单个kafka消费者听多个kafka队列

时间:2017-03-06 11:12:46

标签: apache-kafka kafka-consumer-api

我是Kafka的新手我使用的是1 Kafka经纪人(0.10.2)和1个动物园管理员(3.4.9)。我有两个主题,每个主题有五个分区。我正在运行一个使用者,在consumer.subscribe命令中,我已将该主题作为列表传递。

  

kafkaConsumer.subscribe([topic1,topic2],new HandleRebalance())

但根据我的观察,这个消费者只是连续消费来自一个主题的消息,当该主题不包含任何消息时,它会切换到下一个主题。消费者是否有可能以平衡的方式收听分区?

注意:通过平衡的方式,我的意思是假设10秒,它从topic1消耗,然后切换到topic2。

消费者代码:

import com.mongodb.DBObject
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.consumer.ConsumerRecords
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.clients.consumer.OffsetCommitCallback
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.InterruptException
import org.apache.kafka.common.errors.WakeupException
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import java.util.regex.Pattern
class KafkaPollingConsumer implements Runnable {
        private static final Logger logger = LoggerFactory.getLogger(KafkaPollingConsumer.class)
            private static final String TAG = "[KafkaPollingConsumer]"
            private final KafkaConsumer<String, byte []> kafkaConsumer
            private Map<TopicPartition,OffsetAndMetadata> currentOffsetsMap = new HashMap<>()
            List topicNameList
            Map kafkaTopicConfigMap = new HashMap<String,Object>()
            Map kafkaTopicMessageListMap = new HashMap<String,List>()

            public KafkaPollingConsumer(String serverType, String groupName, String topicNameRegex){
                logger.debug("{} [Constructor] [Enter] Thread Name {} serverType group Name TopicNameRegex",TAG,Thread.currentThread().getName(),serverType,groupName,topicNameRegex)
                logger.debug("Populating Property for kafak consumer")
                Properties kafkaConsumerProperties = new Properties()
                kafkaConsumerProperties.put("group.id", groupName)
                kafkaConsumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer")
                kafkaConsumerProperties.put("value.deserializer", "com.custom.kafkaconsumer.deserializer.CustomObjectDeserializer")
                switch(serverType){
                    case KafkaTopicConfigEntity.KAFKA_NODE_TYPE_ENUM.Priority.toString() :
                        kafkaConsumerProperties.put("bootstrap.servers",ConfigLoader.conf.kafkaServer.priority.kafkaNode)
                        kafkaConsumerProperties.put("enable.auto.commit",ConfigLoader.conf.kafkaServer.priority.consumer.enable.auto.commit)
                        kafkaConsumerProperties.put("auto.offset.reset",ConfigLoader.conf.kafkaServer.priority.consumer.auto.offset.reset)
                        break
                    case KafkaTopicConfigEntity.KAFKA_NODE_TYPE_ENUM.Bulk.toString() :
                        kafkaConsumerProperties.put("bootstrap.servers",ConfigLoader.conf.kafkaServer.bulk.kafkaNode)
                        kafkaConsumerProperties.put("enable.auto.commit",ConfigLoader.conf.kafkaServer.bulk.consumer.enable.auto.commit)
                        kafkaConsumerProperties.put("auto.offset.reset",ConfigLoader.conf.kafkaServer.bulk.consumer.auto.offset.reset)
                        kafkaConsumerProperties.put("max.poll.records",10)
                        kafkaConsumerProperties.put("max.poll.interval.ms",900000)
                        kafkaConsumerProperties.put("request.timeout.ms",900000)
                        break
                    default :
                        throw "Invalid server type"
                        break
                }
                logger.debug("{} [Constructor] KafkaConsumer Property Populated {}",properties.toString())
                kafkaConsumer = new KafkaConsumer<String, byte []>(kafkaConsumerProperties)
                topicNameList = topicNameRegex.split(Pattern.quote('|'))
                logger.debug("{} [Constructor] Kafkatopic List {}",topicNameList.toString())
                logger.debug("{} [Constructor] Exit",TAG)
            }

            private class HandleRebalance implements ConsumerRebalanceListener {
                public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                }

                public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                    if(currentOffsetsMap != null && !currentOffsetsMap.isEmpty()) {
                        logger.debug("{} In onPartitionsRevoked Rebalanced ",TAG)
                        kafkaConsumer.commitSync(currentOffsetsMap)
                    }
                }
            }

            @Override
            void run() {
                logger.debug("{} Starting Thread ThreadName {}",TAG,Thread.currentThread().getName())
                populateKafkaConfigMap()
                initializeKafkaTopicMessageListMap()
                String topicName
                String consumerClassName
                String consumerMethodName
                Boolean isBatchJob
                Integer batchSize = 0
                final Thread mainThread = Thread.currentThread()
                Runtime.getRuntime().addShutdownHook(new Thread() {
                    public void run() {
                        logger.error("{},gracefully shutdowning thread {}",TAG,mainThread.getName())
                        kafkaConsumer.wakeup()
                        try {
                            mainThread.join()
                        } catch (InterruptedException exception) {
                            logger.error("{} Error : {}",TAG,exception.getStackTrace().join("\n"))
                        }
                    }
                })
                kafkaConsumer.subscribe(topicNameList , new HandleRebalance())
                try{
                    while(true){
                        logger.debug("{} Starting Consumer with polling time in ms 100",TAG)
                        ConsumerRecords kafkaRecords = kafkaConsumer.poll(100)
                        for(ConsumerRecord record: kafkaRecords){
                            topicName = record.topic()
                            DBObject kafkaTopicConfigDBObject = kafkaTopicConfigMap.get(topicName)
                            consumerClassName = kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.CLASS_NAME_KEY)
                            consumerMethodName = kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.METHOD_NAME_KEY)
                            isBatchJob = kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.IS_BATCH_JOB_KEY)
                            logger.debug("Details about Message")
                            logger.debug("Thread {}",mainThread.getName())
                            logger.debug("Topic {}",topicName)
                            logger.debug("Partition {}",record.partition().toString())
                            logger.debug("Offset {}",record.offset().toString())
                            logger.debug("clasName {}",consumerClassName)
                            logger.debug("methodName {}",consumerMethodName)
                            logger.debug("isBatchJob {}",isBatchJob.toString())
                            if(isBatchJob == true){
                                batchSize = Integer.parseInt(kafkaTopicConfigDBObject.get(KafkaTopicConfigEntity.BATCH_SIZE_KEY).toString())
                                logger.debug("batchSize {}",batchSize.toString())
                            }
                            Object message = record.value()
                            logger.debug("message {}",message.toString())
                            publishMessageToConsumers(consumerClassName,consumerMethodName,isBatchJob,batchSize,message,topicName)
                            Thread.sleep(60000)
                            currentOffsetsMap.put(new TopicPartition(record.topic(), record.partition()),new OffsetAndMetadata(record.offset() +1))
                        }
                        logger.debug("{} Commiting Messages to Kafka",TAG)
                        kafkaConsumer.commitSync(currentOffsetsMap)
                    }
                }
                catch(InterruptException exception){
                    logger.error("{} In InterruptException",TAG)
                    logger.error("{} Exception {}",TAG,exception.getStackTrace().join("\n"))
                }
                catch (WakeupException exception) {
                    logger.error("{} In WakeUp Exception",TAG)
                    logger.error("{} Exception {}",TAG,exception.getStackTrace().join("\n"))
                }
                catch(Exception exception){
                    logger.error("{} In Exception",TAG)
                    logger.error("{} Exception {}",TAG,exception.getStackTrace().join("\n"))
                }
                finally {
                    logger.error("{} In finally commiting remaining offset ",TAG)
                    publishAllKafkaTopicBatchMessages()
                    kafkaConsumer.commitSync(currentOffsetsMap)
                    kafkaConsumer.close()
                    logger.error("{} Exiting Consumer",TAG)
                }
            }


    private void publishMessageToConsumers(String consumerClassName,String consumerMethodName,Boolean isBatchJob,Integer batchSize,Object message, String topicName){
        logger.debug("{} [publishMessageToConsumer] Enter",TAG)
        if(isBatchJob == true){
            publishMessageToBatchConsumer(consumerClassName, consumerMethodName,batchSize, message, topicName)
        }
        else{
            publishMessageToNonBatchConsumer(consumerClassName, consumerMethodName, message)
        }
        logger.debug("{} [publishMessageToConsumer] Exit",TAG)
    }

    private void publishMessageToNonBatchConsumer(String consumerClassName, String consumerMethodName, message){
        logger.debug("{} [publishMessageToNonBatchConsumer] Enter",TAG)
        executeConsumerMethod(consumerClassName,consumerMethodName,message)
        logger.debug("{} [publishMessageToNonBatchConsumer] Exit",TAG)
    }

    private void publishMessageToBatchConsumer(String consumerClassName, String consumerMethodName, Integer batchSize, Object message, String topicName){
        logger.debug("{} [publishMessageToBatchConsumer] Enter",TAG)
        List consumerMessageList = kafkaTopicMessageListMap.get(topicName)
        consumerMessageList.add(message)
        if(consumerMessageList.size() == batchSize){
            logger.debug("{} [publishMessageToBatchConsumer] Pushing Messages In Batches",TAG)
            executeConsumerMethod(consumerClassName, consumerMethodName, consumerMessageList)
            consumerMessageList.clear()
        }
        kafkaTopicMessageListMap.put(topicName,consumerMessageList)
        logger.debug("{} [publishMessageToBatchConsumer] Exit",TAG)
    }

    private void populateKafkaConfigMap(){
        logger.debug("{} [populateKafkaConfigMap] Enter",TAG)
        KafkaTopicConfigDBService kafkaTopicConfigDBService = KafkaTopicConfigDBService.getInstance()
        topicNameList.each { topicName ->
            DBObject kafkaTopicDBObject = kafkaTopicConfigDBService.findByTopicName(topicName)
            kafkaTopicConfigMap.put(topicName,kafkaTopicDBObject)
        }
        logger.debug("{} [populateKafkaConfigMap] kafkaConfigMap {}",TAG,kafkaTopicConfigMap.toString())
        logger.debug("{} [populateKafkaConfigMap] Exit",TAG)
    }

    private void initializeKafkaTopicMessageListMap(){
        logger.debug("{} [initializeKafkaTopicMessageListMap] Enter",TAG)
        topicNameList.each { topicName ->
            kafkaTopicMessageListMap.put(topicName,[])
        }
        logger.debug("{} [populateKafkaConfigMap] kafkaTopicMessageListMap {}",TAG,kafkaTopicMessageListMap.toString())
        logger.debug("{} [initializeKafkaTopicMessageListMap] Exit",TAG)
    }

    private void executeConsumerMethod(String className, String methodName, def messages){
        try{
            logger.debug("{} [executeConsumerMethod] Enter",TAG)
            logger.debug("{} [executeConsumerMethod] className  {} methodName {} messages {}",TAG,className,methodName,messages.toString())
            Class.forName(className)."$methodName"(messages)
        } catch (Exception exception){
            logger.error("{} [{}] Error while executing method : {} of class: {} with params : {} - {}", TAG, Thread.currentThread().getName(), methodName,
                    className, messages.toString(), exception.getStackTrace().join("\n"))
        }
        logger.debug("{} [executeConsumerMethod] Exit",TAG)
    }

    private void publishAllKafkaTopicBatchMessages(){
        logger.debug("{} [publishAllKafkaTopicBatchMessages] Enter",TAG)
        String consumerClassName = null
        String consumerMethodName = null
        kafkaTopicMessageListMap.each { topicName,messageList ->
            DBObject kafkaTopicDBObject = kafkaTopicConfigMap.get(topicName)
            consumerClassName = kafkaTopicDBObject.get(KafkaTopicConfigEntity.CLASS_NAME_KEY)
            consumerMethodName = kafkaTopicDBObject.get(KafkaTopicConfigEntity.METHOD_NAME_KEY)
            logger.debug("{} Pushing message in topic {} className {} methodName {} ",TAG,topicName,consumerClassName,consumerMethodName)
            if(messageList != null && messageList.size() > 0){
                executeConsumerMethod(consumerClassName, consumerMethodName, messageList)
                messageList.clear()
                kafkaTopicMessageListMap.put(topicName,messageList)
            }
        }
        logger.debug("{} [publishAllKafkaTopicBatchMessages] Exit",TAG)
    }

0 个答案:

没有答案