var crypto = require('crypto');
var sha = crypto.createHash('sha512').update(String(s));
var result = sha.digest('hex');
这是我目前的代码。
我该怎么做这个异步?我打算做sha512 100,000次。
答案 0 :(得分:1)
如果您找不到更好的解决方案,这个技巧可能对您有所帮助:
您可以创建一个独立的SHA-512生成器应用程序,它接收您的String" s"作为标准输入,生成散列,并将其写入标准输出。
在您的应用程序中,您可以通过child_process
模块执行该操作,并使用事件处理程序捕获响应。还有一个stackoverflow线程,它可能对child_process有用:
这样您就可以将同步功能封装到异步上下文中。 :)
答案 1 :(得分:1)
节点的crypto
模块目前不提供异步SHA512散列,虽然createHash()
流接口看起来是异步的,但它也会在主线程中执行并阻塞事件循环。 / p>
有一个问题可以解决:https://github.com/nodejs/node/issues/678
在此期间,您可以使用@ronomon/crypto-async
在线程池中异步和并发地执行SHA512,而不会阻塞事件循环,以实现多核吞吐量。
答案 2 :(得分:0)
Node.js在单个线程中运行,因此如果要进行异步处理,则必须:
我在下面介绍的方法使用后一种方法。
Node.js API提供了一个名为 cluster 的模块,允许您像在C中编程一样 fork 您的进程。
我的方法将输入数据(您想要散列的字符串)分成块,其中每个块都传递给子工作进程。当工人完成其块的工作时,它会向主进程发出信号,并将结果传回。
主节点在工作人员完成工作时继续运行,因此它可以执行任何不相关的异步工作而不会被阻止。当所有工人完成后,主人就会发出信号,可以自由地进一步处理最终结果。
要运行我的测试,您只需执行以下操作:
node parhash
我的测试是在带有8 GB RAM DDR3的 Intel Core i5 4670 上运行的。
对于您需要100� 000字符串,1名工作人员在 450 ms 完成,而10名工作人员 350 ms 。
在一个拥有一百万个字符串的测试中,1个工作人员在 4.5秒中完成了这项工作,而10个工作人员在 3.5秒中完成了工作。
以下是代码:
var
crypto = require('crypto'),
cluster = require('cluster');
var
STRING_COUNT = 1000000,
STRINGS_PER_WORKER = 100000,
WORKER_COUNT = Math.ceil(STRING_COUNT / STRINGS_PER_WORKER),
chunks = [],
nextChunkId = 0,
results = [],
startTime,
pendingWorkers = WORKER_COUNT;
/**
* Generates strings partitioned in WORKER_COUNT chunks.
* Each of these chunks will later be passed to a child process to be parsed asynchronously.
*
* You should replace this with your working data.
*/
function generateDemoStringChunks() {
var
si, wi,
chunk;
for (wi = 0; wi < WORKER_COUNT; wi++) {
chunk = [];
for (si = STRINGS_PER_WORKER * wi; (si < STRINGS_PER_WORKER * (wi + 1)) && (si < STRING_COUNT); si++) {
chunk.push(si.toString());
}
chunks.push(chunk);
}
}
/**
* After all workers finish processing, this will be executed.
*
* Here you should do whatever you want to process the resulting hashes.
*/
function mergeResults() {
results.sort(function compare(a, b) {
return a.id - b.id;
});
console.info('Summary:');
results.forEach(function (result) {
console.info('\tChunk %d: %d hashes (here is the first hash: "%s")', result.id, result.data.length, result.data[0]);
});
}
/**
* This will be called on the master side every time a worker finishes working.
*
* @param {object} worker the Worker that finished
* @param {{id: number, data: [string]}} result the result
*/
function processWorkerResult(worker, result) {
console.info('Worker %d finished computing %d hashes.', worker.id, result.data.length);
results.push(result);
worker.kill();
if (--pendingWorkers == 0) {
console.info('Work is done. Whole process took %d seconds.', process.hrtime(startTime)[0]);
mergeResults();
}
}
/**
* Gets a chunk of data available for processing.
*
* @returns {{id: number, data: [string]}} the chunk to be passed to the worker
*/
function getNextAvailableChunk() {
var chunk = {
id: nextChunkId,
data: chunks[nextChunkId]
};
nextChunkId++;
return chunk;
}
/**
* The master node will send a chunk of data every time a worker node
* signals it's ready to work.
*/
function waitForWorkers() {
cluster.on('online', function (worker) {
console.info('Worker %d is online.', worker.id);
worker.on('message', processWorkerResult.bind(null, worker));
worker.send(getNextAvailableChunk());
});
}
/**
* Start workers.
*/
function spawnWorkers() {
var wi;
for (wi = 0; wi < WORKER_COUNT; wi++) {
cluster.fork();
}
}
/**
* The hash function.
*
* @param {string} s a string to be hashed
* @returns {string} the hash string
*/
function hashString(s) {
return crypto.createHash('sha512').update(s).digest('hex');
}
/**
* A worker will wait for the master to send a chunk of data and will
* start processing as soon as it arrives.
*/
function processChunk() {
cluster.worker.on('message', function(chunk) {
var
result = [];
console.info('Worker %d received chunk %d with a load of %d strings.', cluster.worker.id, chunk.id, chunk.data.length);
chunk.data.forEach(function processChunk(s) {
result.push(hashString(s));
});
cluster.worker.send({
id: chunk.id,
data: result
});
});
}
function main() {
if (cluster.isMaster) {
/*
The master node will instantiate all required workers
and then pass a chunk of data for each one.
It will then wait for all of them to finish so it can
merge the results.
*/
startTime = process.hrtime();
generateDemoStringChunks();
spawnWorkers();
waitForWorkers();
} else {
/*
A worker node will wait for a chunk to arrive and
then will start processing it. When finished, it will
send a message back to the master node with the
resulting hashes.
*/
console.info('Worker %d is starting.', cluster.worker.id);
processChunk();
}
}
main();
我无法判断如果使用线程实现它会有多好,因为我还没有测试过它。如果你想做一个基准,你可以尝试WebWorker Threads(注意:我还没有尝试过WebWorkers模块,而且不保证它有效 - 你自己在这里)。