我按照快速入门指南:http://kafka.apache.org/documentation.html#quickstart并想在node.js中编写一个使用者。主题'测试'已成功创建,我可以使用kafka-console-produce.sh并通过kafka-console-consumer.sh接收消息
我写了一个简单的消费者(live.js):
var kafka = require('kafka-node'),
client = new kafka.Client('localhost:2181/'),
consumer = new kafka.Consumer(client,
[{'topic': 'test', partition: 0}],
{autoCommit: true});
client.on('ready', function(){
console.log('Client ready!');
});
console.log(client);
console.log(consumer);
consumer.on('error', function (err) {
console.log("Kafka Error: Consumer - " + err);
});
consumer.on('offsetOutOfRange', function (err){
console.log("Kafka offsetOutOfRange: " + err);
});
consumer.on('message', function(message){
console.log(message);
});
在运行node live.js
时,我会收到之前发送的所有消息。但是当live.js正在运行并且我通过Kafka提供的脚本生成消息时,live.js不会收到该消息(但它是由Kafka附带的消费者脚本)。重新启动live.js后,我会收到这些消息,但我希望能够及时进入[...]。我使用默认配置,这是来自live.js的日志:
EventEmitter {
connectionString: 'localhost:2181/',
clientId: 'kafka-node-client',
zkOptions: undefined,
noAckBatchOptions: undefined,
brokers: {},
longpollingBrokers: {},
topicMetadata: {},
topicPartitions: {},
correlationId: 0,
_socketId: 0,
cbqueue: {},
brokerMetadata: {},
ready: false,
zk:
EventEmitter {
client:
Client {
domain: null,
_events: [Object],
_eventsCount: 3,
_maxListeners: undefined,
connectionManager: [Object],
options: [Object],
state: [Object] },
_events:
{ init: [Object],
brokersChanged: [Function],
disconnected: [Object],
error: [Function] },
_eventsCount: 4 },
_events:
{ ready: [ [Function], [Function] ],
error: [Function],
close: [Function],
brokersChanged: [Function] },
_eventsCount: 4 }
EventEmitter {
fetchCount: 0,
client:
EventEmitter {
connectionString: 'localhost:2181/',
clientId: 'kafka-node-client',
zkOptions: undefined,
noAckBatchOptions: undefined,
brokers: {},
longpollingBrokers: {},
topicMetadata: {},
topicPartitions: {},
correlationId: 0,
_socketId: 0,
cbqueue: {},
brokerMetadata: {},
ready: false,
zk: EventEmitter { client: [Object], _events: [Object], _eventsCount: 4 },
_events:
{ ready: [Object],
error: [Function],
close: [Function],
brokersChanged: [Function] },
_eventsCount: 4 },
options:
{ autoCommit: true,
groupId: 'kafka-node-group',
autoCommitMsgCount: 100,
autoCommitIntervalMs: 5000,
fetchMaxWaitMs: 100,
fetchMinBytes: 1,
fetchMaxBytes: 1048576,
fromOffset: false,
encoding: 'utf8' },
ready: false,
paused: undefined,
id: 0,
payloads:
[ { topic: 'test',
partition: 0,
offset: 0,
maxBytes: 1048576,
metadata: 'm' } ],
_events: { done: [Function] },
_eventsCount: 1,
encoding: 'utf8' }
Client ready!
- 编辑 -
停止live.js并再次启动后,Zookeeper日志显示以下内容:
[2016-01-27 15:53:20,135] INFO Accepted socket connection from /127.0.0.1:38166 (org.apache.zookeeper.server.NIOServerCnxnFactory)
[2016-01-27 15:53:20,139] WARN Connection request from old client /127.0.0.1:38166; will be dropped if server is in r-o mode (org.apache.zookeeper.server.ZooKeeperServer)
[2016-01-27 15:53:20,140] INFO Client attempting to establish new session at /127.0.0.1:38166 (org.apache.zookeeper.server.ZooKeeperServer)
[2016-01-27 15:53:20,166] INFO Established session 0x1528384e45e0007 with negotiated timeout 30000 for client /127.0.0.1:38166 (org.apache.zookeeper.server.ZooKeeperServer)
答案 0 :(得分:0)
在您有特定需求之前,尝试使用HighLevel消费者。我正在为高级消费者使用以下选项
{
groupId: "Consumer group",
// Auto commit config
autoCommit: true,
autoCommitMsgCount: 100,
autoCommitIntervalMs: 5000,
// Fetch message config
fetchMaxWaitMs: 100,
fetchMinBytes: 1,
fetchMaxBytes: 1024 * 10,
fromOffset: true,
fromBeginning: false, //to stop reading from beggening
encoding:'utf8'
}
答案 1 :(得分:0)
同样的问题here。 降级到版本0.2.27解决了它。