无法找到Kafka DefaultPartitioner错误?

时间:2018-03-11 05:33:52

标签: java apache-kafka

我正在尝试创建一个简单的kafka消费者和制作人,以便从运行在Cytoscape上的另一个应用程序到达它们,这是一个数据可视化软件。所以我做了一个简单的应用程序来发送和接收消息,生产者生成一条消息,消费者将其打印为信息消息。这是我使用的课程:

消费者:

 override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
        view.endEditing(true)
    }

制片人类:

public Consumer(String topicName, String groupID, CySwingAppAdapter adapter){
    this.topicName = topicName;
    this.groupID = groupID;
    this.consumerThread = new ConsumerThread(topicName,groupID,adapter);
    this.adapter = adapter;
}

public void startKafka(){
    ConsumerThread consumerThread = new ConsumerThread(topicName, groupID, adapter);
    consumerThread.start();
}

public void stopKafka() {
    try {
        consumerThread.getKafkaConsumer().wakeup();
        consumerThread.join();
    }catch (Exception e){
        e.printStackTrace();
    }
}
private static class ConsumerThread extends Thread {

    private String topicName;
    private String groupID;
    private KafkaConsumer<String, String> kafkaConsumer;
    private CySwingAppAdapter adapter;

    public ConsumerThread(String topicName, String groupID, CySwingAppAdapter adapter) {
        this.topicName = topicName;
        this.groupID = groupID;
        this.adapter = adapter;
    }

    public void run() {
        Properties configProperties = new Properties();
        configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:32768");
        configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupID);

        kafkaConsumer = new KafkaConsumer<String, String>(configProperties);
        kafkaConsumer.subscribe(Arrays.asList(topicName), new ConsumerRebalanceListener() {
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                System.out.println("Information: This consumer does not listen" +
                        Arrays.toString(partitions.toArray()) + " partition anymore.");
            }
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                System.out.println("Information: This consumer is now listening: " + Arrays.toString(partitions.toArray())
                        + " partition(s)");
            }
        });

        try {
            while (true) {
                ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
                for (ConsumerRecord<String, String> record : records)
                    JOptionPane.showMessageDialog(adapter.getCySwingApplication().getJFrame(),record.value(),
                            "Information!", JOptionPane.INFORMATION_MESSAGE);
            }
        } catch (WakeupException ex) {
            ex.printStackTrace();
        }

        kafkaConsumer.close();
    }

    public KafkaConsumer<String, String> getKafkaConsumer() {
        return this.kafkaConsumer;
    }
}

分区者类:

public Producer(String topicName, String input, CySwingAppAdapter adapter){
    this.adapter = adapter;
    this.topicName = topicName;
    this.input = input;

    this.configProperties = new Properties();
    configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:32768");
    configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
    configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
    configProperties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, KafkaPartitioner.class.getCanonicalName());
    configProperties.put("Partition0","a");
    configProperties.put("Partition1","b");
    this.producer = new KafkaProducer(configProperties);
}

public void produceSmth(){
    ProducerRecord<String, String> rec = new ProducerRecord<String, String>(topicName, input);
    producer.send(rec, new Callback() {
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            System.out.println("Sended record metadata:\nMessage: " + input + "\nTopic: " +
                    metadata.topic() + "\nPartition: " + metadata.partition());
        }

    });

}

public void stopProducer(){
    producer.close();
}

我到达上述项目的课程:

public void configure(Map<String, ?> configs) {
    for(Map.Entry<String,?> entry: configs.entrySet()){
        if(entry.getKey().startsWith("Partition")){
            key = entry.getKey();
            value = (String)entry.getValue();
            int paritionId = Integer.parseInt(key.substring(9));
            kafkaPartitioner.put(value,paritionId);
        }
    }
}

public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes,
                     Cluster cluster) {
    partitionList = cluster.availablePartitionsForTopic(topic);
    input = (String)value;
    prefix = ((String) value).split("-")[0];
    if(kafkaPartitioner.containsKey(prefix)){
        return kafkaPartitioner.get(prefix);
    }else {
        int noOfPartitions = cluster.topics().size();
        return  value.hashCode()%noOfPartitions + kafkaPartitioner.size() ;
    }
}

当我运行代码时,它会出现this.consumer = new Consumer("demo", "group1", adapter); this.producer = new Producer("demo", "a-hello world", adapter); consumer.startKafka(); producer.produceSmth(); producer.stopProducer(); consumer.stopKafka(); 错误。整个错误消息如下。我在哪里做错了?

错误讯息:

DefaultPartitioner could not be found

0 个答案:

没有答案