多个使用者同时使用来自同一主题下不同分区的消息

时间:2019-05-05 09:22:47

标签: java kafka-consumer-api

我的集群已经建立了3个节点。我创建了一个名为test的主题

主题:test PartitionCount:3复制因子:3配置:     主题:test分区:0领导者:2副本:0,1,2 Isr:2,1,0     主题:test分区:1 Leader:2个副本:1,2,0 Isr:2,1,0     主题:test分区:2 Leader:2个副本:2,0,1 Isr:2,1,0

我要创建3个使用者,分别对应测试消息下对应的三个分区的消耗量

我的工作参考该博客。 https:// blog.csdn.net/honglei915/article/details/37697655 我使用MyProducer.java类为测试主题生成209条消息。

消息194 讯息197 讯息200 讯息203 讯息206 ^ C共处理了209条消息

使用低级消费者api MySimpleConsumer.java来消费消息。 只打印一条消息: 领导者主题测试,分区0为10.0.119.125:10008 分区:3,偏移量:439混乱:Message0

使用高级消费者api MyHighLevelConsumer.java消费主题测试消息。结果如下。

16:39:52,217 INFO  kafka.utils.ZkUtils$                                          - conflict in /consumers/myconsumergroup/ids/myconsumergroup_myconsumer1 data: {version:1,subscription:{test:3},pattern:static,timestamp:1557045592132} stored data: {version:1,subscription:{testone:3},pattern:static,timestamp:1557042195140}
16:39:52,220 INFO  kafka.utils.ZkUtils$                                         - I wrote this conflicted ephemeral node [{version:1,subscription:{test:3},pattern:static,timestamp:1557045592132}] at /consumers/myconsumergroup/ids/myconsumergroup_myconsumer1 a while back in a different session, hence I will backoff for this node to be deleted by Zookeeper and retry



// MyProducer.java
package test.wzh;

import kafka.producer.KeyedMessage;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import kafka.javaapi.producer.Producer;
import kafka.producer.ProducerConfig;

public class MyProducer {
    public static void main(String[] args) throws InterruptedException {
         Properties props = new Properties();
         props.put("serializer.class", "kafka.serializer.StringEncoder");
          // props.put("metadata.broker.list", KafkaProperties.BROKER_CONNECT);
         props.put("metadata.broker.list", "******");
         props.put("partitioner.class", "test.wzh.MyPartitioner");
         props.put("request.required.acks", "1");
         ProducerConfig config = new ProducerConfig(props);
         Producer<String, String> producer = new Producer<String, String>(config);

          List<KeyedMessage<String, String>> messages = new ArrayList<KeyedMessage<String, String>>(100);
          for (int i = 0; i <= 208; i++) {
             KeyedMessage<String, String> message =
                      new KeyedMessage<String, String>("test", i + "", "Message" + i);
              messages.add(message);
              if (i % 100 == 0) {
                  producer.send(messages);
                 messages.clear();
            }
        }
         producer.send(messages);
          System.out.println(messages);
    }

}

//MyPartitioner.java 

package test.wzh;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties;


public class MyPartitioner implements Partitioner {
    public MyPartitioner(VerifiableProperties props) {

    }

    @Override
    public int partition(Object key, int numPartitions) {
        /*try {
            long partitionNum = Long.parseLong((String) key);
            return (int) Math.abs(partitionNum % numPartitions);
        } catch (Exception e) {
            return Math.abs(key.hashCode() % numPartitions);
        }*/

        return Integer.valueOf((String) key) % numPartitions;
    }
}

//MyHighLevelConsumer.java

package test.wzh;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;

public class MyHighLevelConsumer {


    private String groupid;

    private String consumerid;

    private int threadPerTopic;

    public class KafkaProperties {

        public static final String ZK_CONNECT = "******";

        public static final String TOPIC = "test";

        public static final String BROKER_CONNECT = "******";

        public static final String GROUP_ID = "test_group1";

    }

    public MyHighLevelConsumer(String groupid, String consumerid, int threadPerTopic) {
        super();
        this.groupid = groupid;
        this.consumerid = consumerid;
        this.threadPerTopic = threadPerTopic;
    }

    public void consume() {
        Properties props = new Properties();
        props.put("group.id", groupid);
        props.put("consumer.id", consumerid);
        props.put("zookeeper.connect", KafkaProperties.ZK_CONNECT);
        props.put("zookeeper.session.timeout.ms", "60000");
        props.put("zookeeper.sync.time.ms", "2000");
        // props.put("auto.commit.interval.ms", "1000");

        ConsumerConfig config = new ConsumerConfig(props);
        ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();

        topicCountMap.put(KafkaProperties.TOPIC, threadPerTopic);

        Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap);

        for (KafkaStream<byte[], byte[]> stream : streams.get(KafkaProperties.TOPIC)) {
            new MyStreamThread(stream).start();
        }
    }

    private class MyStreamThread extends Thread {
        private KafkaStream<byte[], byte[]> stream;

        public MyStreamThread(KafkaStream<byte[], byte[]> stream) {
            super();
            this.stream = stream;
        }

        @Override
        public void run() {
            ConsumerIterator<byte[], byte[]> streamIterator = stream.iterator();

            while (streamIterator.hasNext()) {
                MessageAndMetadata<byte[], byte[]> message = streamIterator.next();
                String topic = message.topic();
                int partition = message.partition();
                long offset = message.offset();
                String key = new String(message.key());
                String msg = new String(message.message());

                System.out.println("consumerid:" + consumerid + ", thread : " + Thread.currentThread().getName()
                        + ", topic : " + topic + ", partition : " + partition + ", offset : " + offset + " , key : "
                        + key + " , mess : " + msg);
            }
        }
    }

    public static void main(String[] args) {
        String groupid = "myconsumergroup";
        MyHighLevelConsumer consumer1 = new MyHighLevelConsumer(groupid, "myconsumer1", 3);
        MyHighLevelConsumer consumer2 = new MyHighLevelConsumer(groupid, "myconsumer2", 3);

        consumer1.consume();
        consumer2.consume();
    }
}

//MySimpleConsumer2.java

package test.wzh;

import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetRequest;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.TopicMetadataResponse;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.Message;
import kafka.message.MessageAndOffset;

public class MySimpleConsumer2 {

    public class KafkaProperties {

        public static final String ZK = "******";

        public static final String TOPIC = "test";

        public static final String BROKER_CONNECT = "******";

        public static final String GROUP_ID = "test_group1";

    }


    public static void main(String[] args) {
        new MySimpleConsumer2().consume();
    }


    public void consume() {
        int partition = 0;

        Broker leaderBroker = findLeader(KafkaProperties.BROKER_CONNECT, KafkaProperties.TOPIC, partition);

        SimpleConsumer simpleConsumer =
                new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), 20000, 100, "mySimpleConsumer");
        long startOffet = 0;
        int fetchSize = 100;


        while (true) {
            long offset = startOffet;
            FetchRequest req =
                    new FetchRequestBuilder().addFetch(KafkaProperties.TOPIC, 0, startOffet, fetchSize).build();


            FetchResponse fetchResponse = simpleConsumer.fetch(req);


            ByteBufferMessageSet messageSet = fetchResponse.messageSet(KafkaProperties.TOPIC, partition);
            for (MessageAndOffset messageAndOffset : messageSet) {
                Message mess = messageAndOffset.message();
                ByteBuffer payload = mess.payload();
                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);
                String msg = new String(bytes);


                offset = messageAndOffset.offset();
                System.out.println("partition : " + 3 + ", offset : " + offset + "  mess : " + msg);
            }

            startOffet = offset + 1;
        }
    }



    public Broker findLeader(String brokerHosts, String topic, int partition) {
        Broker leader = findPartitionMetadata(brokerHosts, topic, partition).leader();
        System.out.println(String.format("Leader tor topic %s, partition %d is %s:%d", topic, partition, leader.host(),
                leader.port()));
        return leader;
    }


    private PartitionMetadata findPartitionMetadata(String brokerHosts, String topic, int partition) {
        PartitionMetadata returnMetaData = null;
        for (String brokerHost : brokerHosts.split(",")) {
            SimpleConsumer consumer = null;
            String[] splits = brokerHost.split(":");
            consumer = new SimpleConsumer(splits[0], Integer.valueOf(splits[1]), 100000, 64 * 1024, "leaderLookup");
            List<String> topics = Collections.singletonList(topic);
            TopicMetadataRequest request = new TopicMetadataRequest(topics);
            TopicMetadataResponse response = consumer.send(request);
            List<TopicMetadata> topicMetadatas = response.topicsMetadata();
            for (TopicMetadata topicMetadata : topicMetadatas) {
                for (PartitionMetadata PartitionMetadata : topicMetadata.partitionsMetadata()) {
                    if (PartitionMetadata.partitionId() == partition) {
                        returnMetaData = PartitionMetadata;
                    }
                }
            }
            if (consumer != null)
                consumer.close();
        }
        return returnMetaData;
    }


    /**
     * 根据时间戳找到某个客户端消费的offset
     *
     * @param consumer SimpleConsumer
     * @param topic topic
     * @param partition 分区
     * @param clientID 客户端的ID
     * @param whichTime 时间戳
     * @return offset
     */
    public long getLastOffset(SimpleConsumer consumer, String topic, int partition, String clientID, long whichTime) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
                new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
        OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientID);
        OffsetResponse response = consumer.getOffsetsBefore(request);
        long[] offsets = response.offsets(topic, partition);
        return offsets[0];
    }
}

我想在同一使用者组下创建3个使用者,分别对应于使用主题测试的三个分区的消息。在控制台中将其打印以查看它。我使用的工具是IDEA。

0 个答案:

没有答案