已启用Spring Kafka交易,但消费者仍收到回滚消息

时间:2019-10-30 11:18:09

标签: apache-kafka spring-kafka

我正在将Spring kafka事务用于生产者和消费者应用程序。

要求在生产者端有多个步骤:将消息发送到kafka,然后保存到db。如果保存到数据库失败,也想回滚该消息,则将其发送到kafka。

因此,在消费者方面,我将isolation.leve设置为read_committed,然后,如果消息是从kafka回滚的,则消费者不应阅读该消息。

生产者应用程序的代码为:

@Configuration
@EnableKafka
public class KafkaConfiguration {

  @Bean
  public ProducerFactory<String, Customer> producerFactory() {
    DefaultKafkaProducerFactory<String, Customer> pf = new DefaultKafkaProducerFactory<>(producerConfigs());
    pf.setTransactionIdPrefix("customer.txn.tx-");
    return pf;
  }

  @Bean
  public Map<String, Object> producerConfigs() {
    Map<String, Object> props = new HashMap<>();
    // create a minimum Producer configs
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "http://127.0.0.1:9092");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class);
    props.put("schema.registry.url", "http://127.0.0.1:8081");

    // create safe Producer
    props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
    props.put(ProducerConfig.ACKS_CONFIG, "all");
    props.put(ProducerConfig.RETRIES_CONFIG, Integer.toString(Integer.MAX_VALUE));
    props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); // kafka 2.0 >= 1.1 so we can keep this as 5. Use 1 otherwise.

    // high throughput producer (at the expense of a bit of latency and CPU usage)
    props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
    props.put(ProducerConfig.LINGER_MS_CONFIG, "20");
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(32 * 1024)); // 32 KB batch size
    return props;
  }

  @Bean
  public KafkaTemplate<String, Customer> kafkaTemplate() {
    return new KafkaTemplate<>(producerFactory());
  }

  @Bean
  public KafkaTransactionManager kafkaTransactionManager(ProducerFactory<String, Customer> producerFactory) {
    KafkaTransactionManager<String, Customer> ktm = new KafkaTransactionManager<>(producerFactory);
    ktm.setTransactionSynchronization(AbstractPlatformTransactionManager.SYNCHRONIZATION_ON_ACTUAL_TRANSACTION);
    return ktm;
  }

  @Bean
  @Primary
  public JpaTransactionManager jpaTransactionManager(EntityManagerFactory entityManagerFactory) {
    return new JpaTransactionManager(entityManagerFactory);
  }

  @Bean(name = "chainedTransactionManager")
  public ChainedTransactionManager chainedTransactionManager(JpaTransactionManager jpaTransactionManager,
                                                             KafkaTransactionManager kafkaTransactionManager) {
    return new ChainedTransactionManager(kafkaTransactionManager, jpaTransactionManager);
  }
}


@Component
@Slf4j
public class KafkaProducerService {

  private KafkaTemplate<String, Customer> kafkaTemplate;
  private CustomerConverter customerConverter;
  private CustomerRepository customerRepository;

  public KafkaProducerService(KafkaTemplate<String, Customer> kafkaTemplate, CustomerConverter customerConverter, CustomerRepository customerRepository) {
    this.kafkaTemplate = kafkaTemplate;
    this.customerConverter = customerConverter;
    this.customerRepository = customerRepository;
  }

  @Transactional(transactionManager = "chainedTransactionManager", rollbackFor = Exception.class)
  public void sendEvents(String topic, CustomerModel customer) {
    LOGGER.info("Sending to Kafka: topic: {}, key: {}, customer: {}", topic, customer.getKey(), customer);
//    kafkaTemplate.send(topic, customer.getKey(), customerConverter.convertToAvro(customer));
    kafkaTemplate.executeInTransaction(kt -> kt.send(topic, customer.getKey(), customerConverter.convertToAvro(customer)));
    customerRepository.saveToDb();
  }
}


因此,我在saveToDb方法中明确抛出异常,并且可以看到抛出异常。但是消费者应用程序仍然可以看到该消息。

消费者代码:

@Slf4j
@Configuration
@EnableKafka
public class KafkaConfiguration {

  @Bean
  ConcurrentKafkaListenerContainerFactory<String, Customer> kafkaListenerContainerFactory() {
    ConcurrentKafkaListenerContainerFactory<String, Customer> factory = new ConcurrentKafkaListenerContainerFactory<>();
    factory.setConsumerFactory(consumerFactory());
    factory.setAfterRollbackProcessor(new DefaultAfterRollbackProcessor<String, Customer>(-1));


//    SeekToCurrentErrorHandler errorHandler =
//        new SeekToCurrentErrorHandler((record, exception) -> {
//          // recover after 3 failures - e.g. send to a dead-letter topic
////          LOGGER.info("***in error handler data, {}", record);
////          LOGGER.info("***in error handler headers, {}", record.headers());
////          LOGGER.info("value: {}", new String(record.headers().headers("springDeserializerExceptionValue").iterator().next().value()));
//        }, 3);
//
//    factory.setErrorHandler(errorHandler);

    return factory;
  }

  @Bean
  public ConsumerFactory<String, Customer> consumerFactory() {
    return new DefaultKafkaConsumerFactory<>(consumerConfigs());
  }

  @Bean
  public Map<String, Object> consumerConfigs() {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
//    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
//    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class);

    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer2.class);
    props.put(ErrorHandlingDeserializer2.VALUE_DESERIALIZER_CLASS, KafkaAvroDeserializer.class);

    props.put("schema.registry.url", "http://127.0.0.1:8081");
    props.put("specific.avro.reader", "true");
    props.put("isolation.level", "read_committed");

//    props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); // disable auto commit of offsets
    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100"); // disable auto commit of offsets
    return props;
  }
}

@Component
@Slf4j
public class KafkaConsumerService {

  @KafkaListener(id = "demo-consumer-stream-group", topics = "customer.txn")
  @Transactional
  public void process(ConsumerRecord<String, Customer> record) {
    LOGGER.info("Customer key: {} and value: {}", record.key(), record.value());
    LOGGER.info("topic: {}, partition: {}, offset: {}", record.topic(), record.partition(), record.offset());
  }
}

我在这里错过了什么吗?

1 个答案:

答案 0 :(得分:0)

executeInTransaction将在单独的事务中运行。参见javadocs:

/**
 * Execute some arbitrary operation(s) on the operations and return the result.
 * The operations are invoked within a local transaction and do not participate
 * in a global transaction (if present).
 * @param callback the callback.
 * @param <T> the result type.
 * @return the result.
 * @since 1.1
 */
<T> T executeInTransaction(OperationsCallback<K, V, T> callback);

只需使用send()即可参与现有交易。