了解kafka zookeper自动重置

时间:2017-04-25 05:22:43

标签: apache-kafka

我仍然怀疑kafka ZOOKEPER_AUTO_RESET。我已经看到很多关于这方面的问题。如果相同的是重复的查询,请原谅。

我有一个高级别的java消费者继续消费。 我有多个主题,所有主题都有一个分区。

我关注的是以下内容。

我创建了消费者组名为“ncdev1”和ZOOKEPER_AUTO_RESET = smallest的consumerkafka.jar。可以观察到init offset设置为-1。然后我会在一段时间后停止/启动罐子。此时,它选择分配给使用者组(ncdev1)的最新偏移量,即36.我在一段时间后再次重新启动,然后将initoffset设置为39.这是最新值。

然后我将组名改为ZOOKEPER_GROUP_ID = ncdev2。并重新启动jar文件,这次将偏移量设置为-1。在进一步重新启动时,它跳转到最新值,即39

然后我设置了 ZOOKEPER_AUTO_RESET=largestZOOKEPER_GROUP_ID = ncdev3

然后尝试使用组名ncdev3重新启动jar文件。它重新启动时选择偏移量的方式没有区别。也就是说它重新启动时选择39,这与之前的配置相同。

为什么不从头开始选择偏移量的任何想法。还有其他任何配置要使它从头开始读取?(来自What determines Kafka consumer offset?的最大和最小理解)

先谢谢

已添加代码

public class ConsumerForKafka {
    private final ConsumerConnector consumer;
    private final String topic;
    private  ExecutorService executor;
    ServerSocket soketToWrite;
    Socket s_Accept ;
    OutputStream s1out ;
    DataOutputStream dos;
    static boolean logEnabled ;
    static File fileName;


    private static final Logger logger = Logger.getLogger(ConsumerForKafka.class);


    public ConsumerForKafka(String a_zookeeper, String a_groupId, String a_topic,String session_timeout,String auto_reset,String a_commitEnable) {
        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
                createConsumerConfig(a_zookeeper, a_groupId,session_timeout,auto_reset,a_commitEnable));
        this.topic =a_topic;
    }


    public void run(int a_numThreads,String a_zookeeper,  String a_topic) throws InterruptedException, IOException {
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(a_numThreads));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
        String socketURL = PropertyUtils.getProperty("SOCKET_CONNECT_HOST");  
        int socketPort = Integer.parseInt(PropertyUtils.getProperty("SOCKET_CONNECT_PORT")); 
        Socket socks = new Socket(socketURL,socketPort);        

        //****
        String keeper = a_zookeeper;
        String topic = a_topic;

        long millis = new java.util.Date().getTime();

        //****

        PrintWriter outWriter = new PrintWriter(socks.getOutputStream(), true);

        List<KafkaStream<byte[], byte[]>> streams = null;
        // now create an object to consume the messages
        //
        int threadNumber = 0;
       // System.out.println("going to forTopic value is "+topic);
        boolean keepRunningThread =false;
        boolean chcek = false;
        logger.info("logged");
        BufferedWriter bw = null;
        FileWriter fw = null;
        if(logEnabled){
            fw = new FileWriter(fileName, true);
            bw = new BufferedWriter(fw);
        }

        for (;;) {


            streams = consumerMap.get(topic);
            keepRunningThread =true;

            for (final KafkaStream stream : streams) {

                ConsumerIterator<byte[], byte[]> it = stream.iterator();

                while(keepRunningThread) 
                {

                try{


                   if (it.hasNext()){

                       if(logEnabled){
                           String data = new String(it.next().message())+""+"\n";
                           bw.write(data);
                           bw.flush();
                            outWriter.print(data);
                            outWriter.flush();
                            consumer.commitOffsets();
                            logger.info("Explicit commit ......");
                       }else{

                           outWriter.print(new String(it.next().message())+""+"\n");
                           outWriter.flush();
                       }

                    }
                  // logger.info("running");


                } catch(ConsumerTimeoutException ex) {

                    keepRunningThread =false;
                    break;

                  }catch(NullPointerException npe ){

                      keepRunningThread =true;
                      npe.printStackTrace();
                  }catch(IllegalStateException ile){
                      keepRunningThread =true;
                      ile.printStackTrace();
                  }

                }

            }

        }
    }

    private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId,String session_timeout,String auto_reset,String commitEnable) {
        Properties props = new Properties();
        props.put("zookeeper.connect", a_zookeeper);
        props.put("group.id", a_groupId);
        props.put("zookeeper.session.timeout.ms", session_timeout);
        props.put("zookeeper.sync.time.ms", "2000");
        props.put("auto.offset.reset", auto_reset);
        props.put("auto.commit.interval.ms", "60000");
        props.put("consumer.timeout.ms", "30");  
        props.put("auto.commit.enable",commitEnable);
        //props.put("rebalance.max.retries", "4"); 


        return new ConsumerConfig(props);
    }

    public static void main(String[] args) throws InterruptedException {

        String zooKeeper = PropertyUtils.getProperty("ZOOKEEPER_URL_PORT");  
        String groupId =  PropertyUtils.getProperty("ZOOKEPER_GROUP_ID");
        String session_timeout =  PropertyUtils.getProperty("ZOOKEPER_SESSION_TIMOUT_MS"); //6400
        String auto_reset =  PropertyUtils.getProperty("ZOOKEPER_AUTO_RESET");  //smallest
        String enableLogging =  PropertyUtils.getProperty("ENABLE_LOG");
        String directoryPath =  PropertyUtils.getProperty("LOG_DIRECTORY");
        String log4jpath = PropertyUtils.getProperty("LOG_DIR");
        String commitEnable = PropertyUtils.getProperty("ZOOKEPER_COMMIT"); //false
        PropertyConfigurator.configure(log4jpath);

        String socketURL = PropertyUtils.getProperty("SOCKET_CONNECT_HOST");  
        int socketPort = Integer.parseInt(PropertyUtils.getProperty("SOCKET_CONNECT_PORT")); 
        try {
            Socket socks = new Socket(socketURL,socketPort);
            boolean connected = socks.isConnected() && !socks.isClosed();
            if(connected){
                //System.out.println("Able to connect ");
            }else{
                logger.info("Not able to conenct to socket ..Exiting...");
                System.exit(0);
            }
        } catch (UnknownHostException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        } catch(java.net.ConnectException cne){
            logger.info("Not able to conenct to socket ..Exitring...");
            System.exit(0);
        }
        catch (IOException e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
        }

      //  String zooKeeper = args[0];
       // String groupId = args[1];
        String topic = args[0];
        int threads = 1;

        logEnabled = Boolean.parseBoolean(enableLogging);
        if(logEnabled)
            createDirectory(topic,directoryPath);

        ConsumerForKafka example = new ConsumerForKafka(zooKeeper, groupId, topic, session_timeout,auto_reset,commitEnable);
        try {
            example.run(threads,zooKeeper,topic);
        } catch(java.net.ConnectException cne){
            cne.printStackTrace();
            System.exit(0);
        }
        catch (IOException e) {
            // TODO Auto-generated catch block

            e.printStackTrace();


        }


    }

    private static void createDirectory(String topic,String d_Path) {

        try{
        File file = new File(d_Path);
        if (!file.exists()) {
            if (file.mkdir()) {
                logger.info("Directory  Created" +file.getPath());
            } else {

                logger.info("Directory  Creation failed");
            }
        }

         fileName = new File(d_Path + topic + ".log");
        if (!fileName.exists()) {
            fileName.createNewFile();
        }



        }catch(IOException IOE){
            //logger.info("IOException occured during Directory or During File creation ");
        }


    }
}

1 个答案:

答案 0 :(得分:0)

仔细阅读你的帖子后,我认为你遇到的应该是预期的。

  

我启动了consumerkafka.jar,其消费者组名称为“ncdev1”,ZOOKEPER_AUTO_RESET =最小。可以观察到init offset设置为-1。然后我会在一段时间后停止/启动罐子。此时,它选择分配给消费者组(ncdev1)的最新偏移量,即36.

auto.offset.reset仅在没有初始偏移或偏移量超出范围时才应用由于日志中只有36条消息,因此可能消费者群体非常快速地阅读所有这些记录,这就是为什么你看到消费者群体每次重新启动时都会选择最新的偏移量。