我正在试图确定这是否是一个公平的基准。目标是尝试查看各种大小的Node.JS可以处理多少并发连接与有效负载。代码如下。
var express = require('express');
var Sequelize = require('sequelize');
var fs = require('fs');
var app = express();
var data;
var filename = process.argv[2] || './start.json';
console.log("Using: " + filename);
data = fs.readFileSync(filename);
var blockSize = 250000;
app.get('/start', function (req, res) {
// Break up data in blocks. Works to super high concurrents.
// for(var i = 0; i < data.length; i+=blockSize)
// res.write(data.slice(i, i+blockSize));
// Can only handle about 600 concurrent requests if datasize > 500KB
res.send(data);
});
app.listen(3000, function () {
console.log('Listing on 3000.');
});
如评论中所述,如果有效负载大小大于约500KB并且有500个并发,则它将在负载测试客户端获得“通过对等方重置连接”。如果您将数据切片并以块的形式写入,那么在开始之前它可以存活到更高的并发数。 stock node和express都表现出这种行为。
答案 0 :(得分:1)
data = fs.readFileSync(filename);
同步方法是nodejs杀手。它实际上阻止了 ALL 请求的事件循环,使得表现非常糟糕。
试试这个:
var express = require('express');
var Sequelize = require('sequelize');
var fs = require('fs');
var app = express();
var filename = process.argv[2] || './start.json';
var blockSize = 250000;
app.get('/start', function (req, res) {
// Break up data in blocks. Works to super high concurrents.
// for(var i = 0; i < data.length; i+=blockSize)
// res.write(data.slice(i, i+blockSize));
// Can only handle about 600 concurrent requests if datasize > 500KB
console.log("Using: " + filename);
fs.readFile(filename, function (err, data) {
if (err) throw err;
res.send(data);
});
});
app.listen(3000, function () {
console.log('Listing on 3000.');
});
答案 1 :(得分:0)
作为替代方案,您可以创建一个读取流并管道它,这是基于您的代码的示例
var express = require('express');
var fs = require('fs');
var app = express();
var data;
var filename = process.argv[2] || './data.json';
console.log("Using: " + filename);
data = fs.readFileSync(filename);
var readStream = fs.createReadStream(filename);
app.get('/start', function (req, res) {
// Can only handle about 600 concurrent requests if datasize > 500KB
//res.send(data);
readStream.pipe(res);
});