版本:
spring-boot : 2.2.2.RELEASE
spring-kafka : 2.3.7.RELEASE
kafka broker : 2.3.1 (via amazon MSK)
道具:
auto.offset.reset: earliest
enable.auto.commit: false
isolation.level: read_committed
我有一个KafkaListener
,使用ConcurrentKafkaListenerContainerFactory
并配置了ConsumerRecordRecoverer
的自定义实现。我注意到,当此容器确实从某个异常中恢复时,不会提交所述已恢复消息的使用者偏移量。仅当成功处理一条消息(即无法恢复)时才提交偏移。但是,侦听器/消费者/容器似乎确实在内存中保留了实际偏移量,因为在此应用程序保持运行状态时,它会越过已恢复的消息。
如果在未成功处理最后一条消息时重启Spring Boot应用程序,则这将导致问题,并且将从实际提交的最后一个偏移量恢复,可能是已恢复但未提交偏移量的重新处理消息
我通过对一个空主题的本地测试确认了这一点。
在这一点上,我假设我在弹簧工件上缺少一些关键的配置或设置器,但是我不清楚缺少什么。我以为这是使用DefaultAfterRollbackProcessor#setCommitRecovered
至true
的目的。
KafkaConfiguration
@Configuration
public class KafkaConfig {
@Bean
ConsumerRetryConfig retryConfig() {
return new ConsumerRetryConfig();
}
@Bean
public RetryTemplate consumerRetryTemplate(ConsumerRetryConfig consumerRetryConfig) {
RetryTemplate retryTemplate = new RetryTemplate();
FixedBackOffPolicy fixedBackOffPolicy = new FixedBackOffPolicy();
fixedBackOffPolicy.setBackOffPeriod(consumerRetryConfig.getRetryWaitInterval());
retryTemplate.setBackOffPolicy(fixedBackOffPolicy);
SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy();
retryPolicy.setMaxAttempts(consumerRetryConfig.getMaxRetries());
retryTemplate.setRetryPolicy(retryPolicy);
return retryTemplate;
}
@Bean
@Lazy
FiniteRequeueingRecovererConfig finiteRequeueingRecovererConfig() {
return new FiniteRequeueingRecovererConfig();
}
@Bean
@Lazy
FiniteRequeueingRecordRecoverer finiteRequeueingRecordRecoverer(
KafkaTemplate<String, SpecificRecord> kafkaTemplate,
FiniteRequeueingRecovererConfig finiteRequeueingRecovererConfig
) {
return new FiniteRequeueingRecordRecoverer(kafkaTemplate, finiteRequeueingRecovererConfig.getMaxRequeues());
}
@Bean
@Lazy
DefaultAfterRollbackProcessor finiteRequeueingRollbackProcessor(
FiniteRequeueingRecordRecoverer finiteRequeueingRecordRecoverer,
ConsumerRetryConfig consumerRetryConfig
) {
DefaultAfterRollbackProcessor ret = new DefaultAfterRollbackProcessor(
finiteRequeueingRecordRecoverer,
new FixedBackOff(
consumerRetryConfig.getRetryWaitInterval(),
consumerRetryConfig.getMaxRetries()
)
);
ret.setCommitRecovered(true);
return ret;
}
@Bean
public ProducerFactory<String, SpecificRecord> avroMessageProducerFactory(KafkaProperties kafkaProperties) {
Map<String, Object> props = MapBuilder.<String, Object>builder()
.putAll(kafkaProperties.buildProducerProperties())
.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, UUID.randomUUID().toString())
.build();
return (kafkaAvroSerializer==null) ?
new DefaultKafkaProducerFactory<>(props) :
new DefaultKafkaProducerFactory(props, new StringSerializer(), kafkaAvroSerializer);
}
@Bean
public KafkaTemplate<String, SpecificRecord> avroMessageKafkaTemplate(ProducerFactory<String, SpecificRecord> avroMessageProducerFactory) {
return new KafkaTemplate<>(avroMessageProducerFactory);
}
@Bean
public KafkaTransactionManager<?,?> kafkaTransactionManager(ProducerFactory<String, SpecificRecord> avroMessageProducerFactory) {
return new KafkaTransactionManager<>(avroMessageProducerFactory);
}
@Bean
public ConcurrentKafkaListenerContainerFactory<?, ?> finiteRequeueingKafkaListenerContainerFactory(
ConsumerFactory<Object, Object> consumerFactory,
ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
KafkaTransactionManager<Object, Object> kafkaTransactionManager,
DefaultAfterRollbackProcessor finiteRequeueingRollbackProcessor
) {
ConcurrentKafkaListenerContainerFactory<Object, Object> factory =
new ConcurrentKafkaListenerContainerFactory<>();
configurer.configure(factory, consumerFactory);
factory.getContainerProperties().setTransactionManager(kafkaTransactionManager);
factory.setStatefulRetry(true);
factory.setAfterRollbackProcessor(finiteRequeueingRollbackProcessor);
return factory;
}
@KafkaListener(
id = "${some.listener-id}",
topics = "${some.topic}",
groupId = "${some.group-id}",
containerFactory = "finiteRequeueingKafkaListenerContainerFactory"
)
public void consume(
@Payload WebhookNotificationMessage message,
@Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key,
@Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition,
@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
@Header(KafkaHeaders.RECEIVED_TIMESTAMP) long ts
) throws Exception {
// Do the thing, maybe throw an exception
}
}
FiniteRequeueingRecordRecoverer
public class FiniteRequeueingRecordRecoverer implements ConsumerRecordRecoverer {
private final Logger logger = LoggerLike.getLogger(FiniteRequeueingRecordRecoverer.class);
private KafkaTemplate<String, SpecificRecord> kafkaTemplate;
private Integer maxRequeues;
public FiniteRequeueingRecordRecoverer(KafkaTemplate<String, SpecificRecord> kafkaTemplate, Integer maxRequeues) {
this.kafkaTemplate = kafkaTemplate;
this.maxRequeues = maxRequeues;
}
@Override
public void accept(ConsumerRecord<?, ?> consumerRecord, Exception e) {
// Not sure the substance of this recoverer is relevant...but if so
// If the retry number in the avro record is < this.maxRequeues
// then increment the retries and re enqueue this message, move on
// If retries have been exhausted, do not requeue and send to a dead letter or just abandon
}
}
答案 0 :(得分:0)
DefaultAfterRollbackProcessor
需要一个KafkaTemplate
才能将偏移量发送到新交易。
如果commitRecovered
为true并且没有KT,我们应该记录一条警告。