对于Go Lang来说,我是一个新手,我试图对使用Sarama库使用来自Kafka的消息的开源库进行一些调整。原始代码可以找到here.
原始程序包实现了一个PartitionConsumer,如果不需要在使用同一主题的多个使用者之间保持读取一致性,则该PartitionConsumer可以很好地工作,但是这对我不起作用。
我已经使用在网上找到的一些示例在同一应用程序中完成了一些工作,以实现sarama NewConsumerGroup软件包。
下面是我当前正在运行的代码:
package main
import (
"context"
// "flag"
"os"
"os/signal"
"sync"
"syscall"
"encoding/json"
"log"
"strings"
"github.com/Shopify/sarama"
// "github.com/Shopify/sarama/mocks"
)
// KafkaInput is used for recieving Kafka messages and
// transforming them into HTTP payloads.
type KafkaInput struct {
config *KafkaConfig
// consumers []sarama.PartitionConsumer
messages chan *sarama.ConsumerMessage
}
var (
brokers = ""
version = ""
group = ""
topics = ""
assignor = ""
oldest = true
verbose = false
)
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
}
// NewKafkaInput creates instance of kafka consumer client.
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
/**
* Construct a new Sarama configuration.
* The Kafka cluster version has to be defined before the consumer/producer is initialized.
*/
c := sarama.NewConfig()
// Configuration options go here
log.Println("Starting a new Sarama consumer")
if verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("Error parsing Kafka version: %v", err)
}
c.Version = version
if oldest {
c.Consumer.Offsets.Initial = sarama.OffsetOldest
}
/**
* Setup a new Sarama consumer group
*/
consumer := Consumer{ready: make(chan bool)}
ctx, cancel := context.WithCancel(context.Background())
client, err := sarama.NewConsumerGroup(strings.Split(config.host, ","), config.group, c)
if err != nil {
log.Panicf("Error creating consumer group client: %v", err)
}
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for {
if err := client.Consume(ctx, []string{config.topic}, &consumer); err != nil {
log.Panicf("Error from consumer: %v", err)
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
<-consumer.ready // Await till the consumer has been set up
log.Println("Sarama consumer up and running!...")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
cancel()
wg.Wait()
if err = client.Close(); err != nil {
log.Panicf("Error closing client: %v", err)
}
i := &KafkaInput{
config: config,
// consumers: make([]sarama.PartitionConsumer, len(partitions)),
// messages: make(chan *sarama.ConsumerMessage, 256),
messages: make(chan *sarama.ConsumerMessage, 256),
}
return i
}
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
// NOTE:
// Do not move the code below to a goroutine.
// The `ConsumeClaim` itself is called within a goroutine, see:
// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
// ErrorHandler should receive errors
func (i *KafkaInput) ErrorHandler(consumer sarama.PartitionConsumer) {
for err := range consumer.Errors() {
log.Println("Failed to read access log entry:", err)
}
}
// Read Comment
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
func (i *KafkaInput) String() string {
return "Kafka Input: " + i.config.host + "/" + i.config.topic
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
close(consumer.ready)
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}
KafkaConfig带有使用者的groupID和Topic。当我运行此程序时,使用者将启动并使用正确的组从正确的主题中读取内容,并使用在此函数中创建的ConsumerClaim将其打印到STDOUT中:
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
不过,我认为我需要的是NewKafkaInput
函数返回*KafkaInput
,并将声明中的消息添加到结构中(请原谅,如果我在这里使用了错误的术语,这是我的首先进入牛仔竞技场)。
...
i := &KafkaInput{
config: config,
// consumers: make([]sarama.PartitionConsumer, len(partitions)),
// messages: make(chan *sarama.ConsumerMessage, 256),
messages: make(chan *sarama.ConsumerMessage, 256),
}
return i
}
在此处完成的原始示例中:
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
...
go func(consumer sarama.PartitionConsumer) {
defer consumer.Close()
for message := range consumer.Messages() {
i.messages <- message
}
}(consumer)
...
}
我花了几天的时间来研究如何将函数移入和移出NewKafakInput
函数,试图将消息添加到函数外部的KafakInput
结构中以及两者之间的所有内容。我就是无法正常工作。 NewKafakInput
函数需要返回*KafkaInput
并附带任何消息,以便该函数可以完成:
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
我完全有可能完全弄错了这个东西,但是任何帮助和建议都值得赞赏。
谢谢
答案 0 :(得分:0)
这是我的问题的解决方案。我有goroutines阻塞了主要功能,因此需要进行细分。如果下面的代码没有任何意义,这是指向我正在修改的程序的链接:https://github.com/buger/goreplay。如果可以得到所有者的回应,我计划清理代码并提交拉取请求,或者可能发布分叉。
package main
import (
"context"
"encoding/json"
"strings"
"os"
"log"
"github.com/Shopify/sarama"
)
// KafkaInput is used for recieving Kafka messages and
// transforming them into HTTP payloads.
type KafkaInput struct {
sarama.ConsumerGroup
config *KafkaConfig
consumer Consumer
messages chan *sarama.ConsumerMessage
}
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
messages chan *sarama.ConsumerMessage
}
var (
brokers = ""
version = ""
group = ""
topics = ""
assignor = ""
oldest = true
verbose = false
)
// NewKafkaInput creates instance of kafka consumer client.
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
/**
* Construct a new Sarama configuration.
* The Kafka cluster version has to be defined before the consumer/producer is initialized.
*/
c := sarama.NewConfig()
// Configuration options go here
log.Printf("KafkaConfig: %s", config.host)
log.Printf("KafkaConfig: %s", config.group)
log.Printf("KafkaConfig: %s", config.topic)
log.Println("Starting a new Sarama consumer")
if verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("Error parsing Kafka version: %v", err)
}
c.Version = version
if oldest {
c.Consumer.Offsets.Initial = sarama.OffsetOldest
}
group, err := sarama.NewConsumerGroup(strings.Split(config.host, ","), config.group, c)
/**
* Setup a new Sarama consumer group
*/
consumer := Consumer{
ready: make(chan bool),
messages: make(chan *sarama.ConsumerMessage, 256),
}
i := &KafkaInput{
ConsumerGroup: group,
config: config,
messages: make(chan *sarama.ConsumerMessage, 256),
consumer: consumer,
}
go i.loop([]string{config.topic})
i.messages = consumer.messages
return i
}
//ConsumeClaim and stuff
func (i *KafkaInput) ConsumeClaim(s sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) error {
for msg := range c.Messages() {
s.MarkMessage(msg, "")
i.Push(msg)
}
return nil
}
func (i *KafkaInput) loop(topic []string) {
ctx := context.Background()
for {
if err := i.Consume(ctx, []string{i.config.topic}, i); err != nil {
return
}
}
}
// Push Messages
func (i *KafkaInput) Push(m *sarama.ConsumerMessage) {
if i.consumer.messages != nil {
log.Printf("MSGPUSH: %s", m)
i.consumer.messages <- m
}
}
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
log.Printf("Msg: %s", string(message.Value))
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
func (i *KafkaInput) String() string {
return "Kafka Input: " + i.config.host + "/" + i.config.topic
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (i *KafkaInput) Setup(s sarama.ConsumerGroupSession) error {
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (i *KafkaInput) Cleanup(s sarama.ConsumerGroupSession) error {
return nil
}