use of org.springframework.kafka.support.Acknowledgment in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testCommitFailsOnRevoke.
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
void testCommitFailsOnRevoke() throws Exception {
ConsumerFactory<Integer, String> cf = mock(ConsumerFactory.class);
Consumer<Integer, String> consumer = mock(Consumer.class);
given(cf.createConsumer(eq("grp"), eq("clientId"), isNull(), any())).willReturn(consumer);
Map<String, Object> cfProps = new LinkedHashMap<>();
given(cf.getConfigurationProperties()).willReturn(cfProps);
final Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records = new HashMap<>();
TopicPartition topicPartition0 = new TopicPartition("foo", 0);
records.put(topicPartition0, Arrays.asList(new ConsumerRecord<>("foo", 0, 0L, 1, "foo"), new ConsumerRecord<>("foo", 0, 1L, 1, "bar")));
records.put(new TopicPartition("foo", 1), Arrays.asList(new ConsumerRecord<>("foo", 1, 0L, 1, "foo"), new ConsumerRecord<>("foo", 1, 1L, 1, "bar")));
ConsumerRecords<Integer, String> consumerRecords = new ConsumerRecords<>(records);
ConsumerRecords<Integer, String> emptyRecords = new ConsumerRecords<>(Collections.emptyMap());
AtomicBoolean first = new AtomicBoolean(true);
AtomicInteger rebalance = new AtomicInteger();
AtomicReference<ConsumerRebalanceListener> rebal = new AtomicReference<>();
CountDownLatch latch = new CountDownLatch(2);
given(consumer.poll(any(Duration.class))).willAnswer(i -> {
Thread.sleep(50);
int call = rebalance.getAndIncrement();
if (call == 0) {
rebal.get().onPartitionsRevoked(Collections.emptyList());
rebal.get().onPartitionsAssigned(records.keySet());
} else if (call == 1) {
rebal.get().onPartitionsRevoked(Collections.singletonList(topicPartition0));
rebal.get().onPartitionsAssigned(Collections.emptyList());
}
latch.countDown();
return first.getAndSet(false) ? consumerRecords : emptyRecords;
});
willAnswer(invoc -> {
rebal.set(invoc.getArgument(1));
return null;
}).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class));
List<Map<TopicPartition, OffsetAndMetadata>> commits = new ArrayList<>();
AtomicBoolean firstCommit = new AtomicBoolean(true);
AtomicInteger commitCount = new AtomicInteger();
willAnswer(invoc -> {
commits.add(invoc.getArgument(0, Map.class));
if (!firstCommit.getAndSet(false)) {
throw new CommitFailedException();
}
return null;
}).given(consumer).commitSync(any(), any());
ContainerProperties containerProps = new ContainerProperties("foo");
containerProps.setGroupId("grp");
containerProps.setAckMode(AckMode.MANUAL);
containerProps.setClientId("clientId");
containerProps.setIdleEventInterval(100L);
AtomicReference<Acknowledgment> acknowledgment = new AtomicReference<>();
containerProps.setMessageListener((AcknowledgingMessageListener<Object, Object>) (rec, ack) -> acknowledgment.set(ack));
containerProps.setConsumerRebalanceListener(new ConsumerAwareRebalanceListener() {
@Override
public void onPartitionsRevokedBeforeCommit(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
if (acknowledgment.get() != null) {
acknowledgment.get().acknowledge();
}
}
});
Properties consumerProps = new Properties();
containerProps.setKafkaConsumerProperties(consumerProps);
KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps);
container.start();
assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(container.getAssignedPartitions()).hasSize(1);
container.stop();
}
use of org.springframework.kafka.support.Acknowledgment in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testInOrderAckPauseUntilAcked.
@SuppressWarnings("unchecked")
private void testInOrderAckPauseUntilAcked(AckMode ackMode, boolean batch) throws Exception {
ConsumerFactory<Integer, String> cf = mock(ConsumerFactory.class);
Consumer<Integer, String> consumer = mock(Consumer.class);
given(cf.createConsumer(eq("grp"), eq("clientId"), isNull(), any())).willReturn(consumer);
Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records1 = new HashMap<>();
records1.put(new TopicPartition("foo", 0), Arrays.asList(new ConsumerRecord<>("foo", 0, 0L, 1, "foo"), new ConsumerRecord<>("foo", 0, 1L, 1, "bar"), new ConsumerRecord<>("foo", 0, 2L, 1, "baz"), new ConsumerRecord<>("foo", 0, 3L, 1, "qux")));
ConsumerRecords<Integer, String> consumerRecords1 = new ConsumerRecords<>(records1);
Map<TopicPartition, List<ConsumerRecord<Integer, String>>> records2 = new HashMap<>();
records2.put(new TopicPartition("foo", 0), Arrays.asList(new ConsumerRecord<>("foo", 0, 4L, 1, "fiz")));
ConsumerRecords<Integer, String> consumerRecords2 = new ConsumerRecords<>(records2);
ConsumerRecords<Integer, String> empty = new ConsumerRecords<>(Collections.emptyMap());
AtomicBoolean paused = new AtomicBoolean();
AtomicBoolean polledWhilePaused = new AtomicBoolean();
AtomicReference<Collection<TopicPartition>> pausedParts = new AtomicReference<>(Collections.emptySet());
final CountDownLatch pauseLatch = new CountDownLatch(1);
willAnswer(inv -> {
paused.set(true);
pausedParts.set(inv.getArgument(0));
pauseLatch.countDown();
return null;
}).given(consumer).pause(any());
willAnswer(inv -> {
paused.set(false);
pausedParts.set(Collections.emptySet());
return null;
}).given(consumer).resume(any());
willAnswer(inv -> {
return pausedParts.get();
}).given(consumer).paused();
willAnswer(inv -> {
return Collections.singleton(new TopicPartition("foo", 0));
}).given(consumer).assignment();
AtomicInteger polled = new AtomicInteger();
given(consumer.poll(any(Duration.class))).willAnswer(i -> {
Thread.sleep(50);
if (paused.get()) {
polledWhilePaused.set(true);
return empty;
} else {
if (polled.incrementAndGet() == 1) {
return consumerRecords1;
} else if (polled.get() == 2) {
return consumerRecords2;
}
return empty;
}
});
TopicPartitionOffset topicPartition = new TopicPartitionOffset("foo", 0);
ContainerProperties containerProps = new ContainerProperties(topicPartition);
containerProps.setGroupId("grp");
containerProps.setAckMode(AckMode.MANUAL);
containerProps.setAsyncAcks(true);
containerProps.setCommitLogLevel(Level.WARN);
final CountDownLatch latch1 = new CountDownLatch(4);
final CountDownLatch latch2 = new CountDownLatch(5);
final List<Acknowledgment> acks = new ArrayList<>();
if (batch) {
BatchAcknowledgingMessageListener<Integer, String> batchML = (data, ack) -> {
acks.add(ack);
data.forEach(rec -> {
latch1.countDown();
latch2.countDown();
if (latch2.getCount() == 0) {
ack.acknowledge();
}
});
};
containerProps.setMessageListener(batchML);
} else {
AcknowledgingMessageListener<Integer, String> messageListener = (data, ack) -> {
latch1.countDown();
latch2.countDown();
acks.add(ack);
if (latch1.getCount() == 0 && records1.values().size() > 0 && records1.values().iterator().next().size() == 4) {
acks.get(3).acknowledge();
acks.get(2).acknowledge();
acks.get(1).acknowledge();
}
if (latch2.getCount() == 0) {
acks.get(4).acknowledge();
}
};
containerProps.setMessageListener(messageListener);
}
final CountDownLatch commitLatch = new CountDownLatch(2);
final List<Long> committed = new ArrayList<>();
willAnswer(inv -> {
Map<TopicPartition, OffsetAndMetadata> offsets = inv.getArgument(0);
committed.add(offsets.values().iterator().next().offset());
commitLatch.countDown();
return null;
}).given(consumer).commitSync(anyMap(), any());
containerProps.setClientId("clientId");
KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps);
container.start();
assertThat(latch1.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(pauseLatch.await(10, TimeUnit.SECONDS)).isTrue();
acks.get(0).acknowledge();
assertThat(latch2.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(commitLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(committed.get(0)).isEqualTo(4L);
assertThat(committed.get(1)).isEqualTo(5L);
assertThat(polledWhilePaused.get()).isTrue();
verify(consumer, times(2)).commitSync(any(), any());
verify(consumer).commitSync(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(4L)), Duration.ofMinutes(1));
verify(consumer).commitSync(Map.of(new TopicPartition("foo", 0), new OffsetAndMetadata(5L)), Duration.ofMinutes(1));
verify(consumer).pause(any());
verify(consumer).resume(any());
container.stop();
}
use of org.springframework.kafka.support.Acknowledgment in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testDelegateType.
@Test
public void testDelegateType() throws Exception {
Map<String, Object> props = KafkaTestUtils.consumerProps("delegate", "false", embeddedKafka);
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
ContainerProperties containerProps = new ContainerProperties(topic3);
containerProps.setShutdownTimeout(60_000L);
final AtomicReference<StackTraceElement[]> trace = new AtomicReference<>();
final CountDownLatch latch1 = new CountDownLatch(1);
containerProps.setMessageListener((MessageListener<Integer, String>) record -> {
trace.set(new RuntimeException().getStackTrace());
latch1.countDown();
});
ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.setPoolSize(10);
scheduler.initialize();
containerProps.setConsumerTaskExecutor(scheduler);
KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps);
container.setBeanName("delegate");
AtomicReference<List<TopicPartitionOffset>> offsets = new AtomicReference<>();
container.setApplicationEventPublisher(e -> {
if (e instanceof ConsumerStoppingEvent) {
ConsumerStoppingEvent event = (ConsumerStoppingEvent) e;
offsets.set(event.getPartitions().stream().map(p -> new TopicPartitionOffset(p.topic(), p.partition(), event.getConsumer().position(p, Duration.ofMillis(10_000)))).collect(Collectors.toList()));
}
});
assertThat(container.getGroupId()).isEqualTo("delegate");
container.start();
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
template.setDefaultTopic(topic3);
template.sendDefault(0, 0, "foo");
template.flush();
assertThat(latch1.await(10, TimeUnit.SECONDS)).isTrue();
// Stack traces are environment dependent - verified in eclipse
// assertThat(trace.get()[1].getMethodName()).contains("invokeRecordListener");
container.stop();
List<TopicPartitionOffset> list = offsets.get();
assertThat(list).isNotNull();
list.forEach(tpio -> {
if (tpio.getPartition() == 0) {
assertThat(tpio.getOffset()).isEqualTo(1);
} else {
assertThat(tpio.getOffset()).isEqualTo(0);
}
});
final CountDownLatch latch2 = new CountDownLatch(1);
FilteringMessageListenerAdapter<Integer, String> filtering = new FilteringMessageListenerAdapter<>(m -> {
trace.set(new RuntimeException().getStackTrace());
latch2.countDown();
}, d -> false);
// two levels of nesting
filtering = new FilteringMessageListenerAdapter<>(filtering, d -> false);
container.getContainerProperties().setMessageListener(filtering);
container.start();
assertThat(KafkaTestUtils.getPropertyValue(container, "listenerConsumer.listenerType")).isEqualTo(ListenerType.SIMPLE);
template.sendDefault(0, 0, "foo");
assertThat(latch2.await(10, TimeUnit.SECONDS)).isTrue();
// verify that the container called the right method - avoiding the creation of an Acknowledgment
// assertThat(trace.get()[1].getMethodName()).contains("onMessage"); // onMessage(d, a, c) (inner)
// assertThat(trace.get()[2].getMethodName()).contains("onMessage"); // bridge
// assertThat(trace.get()[3].getMethodName()).contains("onMessage"); // onMessage(d, a, c) (outer)
// assertThat(trace.get()[4].getMethodName()).contains("onMessage"); // onMessage(d)
// assertThat(trace.get()[5].getMethodName()).contains("onMessage"); // bridge
// assertThat(trace.get()[6].getMethodName()).contains("invokeRecordListener");
container.stop();
final CountDownLatch latch3 = new CountDownLatch(1);
filtering = new FilteringMessageListenerAdapter<>((AcknowledgingConsumerAwareMessageListener<Integer, String>) (d, a, c) -> {
trace.set(new RuntimeException().getStackTrace());
latch3.countDown();
}, d -> false);
container.getContainerProperties().setMessageListener(filtering);
container.start();
assertThat(KafkaTestUtils.getPropertyValue(container, "listenerConsumer.listenerType")).isEqualTo(ListenerType.ACKNOWLEDGING_CONSUMER_AWARE);
template.sendDefault(0, 0, "foo");
assertThat(latch3.await(10, TimeUnit.SECONDS)).isTrue();
// verify that the container called the 3 arg method directly
// int i = 0;
// if (trace.get()[1].getClassName().endsWith("AcknowledgingConsumerAwareMessageListener")) {
// // this frame does not appear in eclise, but does in gradle.\
// i++;
// }
// assertThat(trace.get()[i + 1].getMethodName()).contains("onMessage"); // onMessage(d, a, c)
// assertThat(trace.get()[i + 2].getMethodName()).contains("onMessage"); // bridge
// assertThat(trace.get()[i + 3].getMethodName()).contains("invokeRecordListener");
container.stop();
long t = System.currentTimeMillis();
container.stop();
assertThat(System.currentTimeMillis() - t).isLessThan(5000L);
pf.destroy();
scheduler.shutdown();
}
use of org.springframework.kafka.support.Acknowledgment in project spring-kafka by spring-projects.
the class MessagingMessageListenerAdapterTests method testFallbackType.
@Test
void testFallbackType() {
final class MyAdapter extends MessagingMessageListenerAdapter<String, String> implements AcknowledgingMessageListener<String, String> {
private MyAdapter() {
super(null, null);
}
@Override
public void onMessage(ConsumerRecord<String, String> data, Acknowledgment acknowledgment) {
toMessagingMessage(data, acknowledgment, null);
}
}
MyAdapter adapter = new MyAdapter();
adapter.setFallbackType(String.class);
RecordMessageConverter converter = mock(RecordMessageConverter.class);
ConsumerRecord<String, String> cr = new ConsumerRecord<>("foo", 1, 1L, null, null);
Acknowledgment ack = mock(Acknowledgment.class);
willReturn(new GenericMessage<>("foo")).given(converter).toMessage(cr, ack, null, String.class);
adapter.setMessageConverter(converter);
adapter.onMessage(cr, ack);
verify(converter).toMessage(cr, ack, null, String.class);
}
use of org.springframework.kafka.support.Acknowledgment in project spring-kafka by spring-projects.
the class KafkaTemplateTests method testWithMessage.
@Test
void testWithMessage() {
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf, true);
Message<String> message1 = MessageBuilder.withPayload("foo-message").setHeader(KafkaHeaders.TOPIC, INT_KEY_TOPIC).setHeader(KafkaHeaders.PARTITION_ID, 0).setHeader("foo", "bar").setHeader(KafkaHeaders.RECEIVED_TOPIC, "dummy").build();
template.send(message1);
ConsumerRecord<Integer, String> r1 = KafkaTestUtils.getSingleRecord(consumer, INT_KEY_TOPIC);
assertThat(r1).has(value("foo-message"));
Iterator<Header> iterator = r1.headers().iterator();
assertThat(iterator.hasNext()).isTrue();
Header next = iterator.next();
assertThat(next.key()).isEqualTo("foo");
assertThat(new String(next.value())).isEqualTo("bar");
assertThat(iterator.hasNext()).isTrue();
next = iterator.next();
assertThat(next.key()).isEqualTo(DefaultKafkaHeaderMapper.JSON_TYPES);
assertThat(iterator.hasNext()).as("Expected no more headers").isFalse();
Message<String> message2 = MessageBuilder.withPayload("foo-message-2").setHeader(KafkaHeaders.TOPIC, INT_KEY_TOPIC).setHeader(KafkaHeaders.PARTITION_ID, 0).setHeader(KafkaHeaders.TIMESTAMP, 1487694048615L).setHeader("foo", "bar").build();
template.send(message2);
ConsumerRecord<Integer, String> r2 = KafkaTestUtils.getSingleRecord(consumer, INT_KEY_TOPIC);
assertThat(r2).has(value("foo-message-2"));
assertThat(r2).has(timestamp(1487694048615L));
MessagingMessageConverter messageConverter = new MessagingMessageConverter();
Acknowledgment ack = mock(Acknowledgment.class);
Consumer<?, ?> mockConsumer = mock(Consumer.class);
KafkaUtils.setConsumerGroupId("test.group.id");
Message<?> recordToMessage = messageConverter.toMessage(r2, ack, mockConsumer, String.class);
assertThat(recordToMessage.getHeaders().get(KafkaHeaders.TIMESTAMP_TYPE)).isEqualTo("CREATE_TIME");
assertThat(recordToMessage.getHeaders().get(KafkaHeaders.RECEIVED_TIMESTAMP)).isEqualTo(1487694048615L);
assertThat(recordToMessage.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC)).isEqualTo(INT_KEY_TOPIC);
assertThat(recordToMessage.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT)).isSameAs(ack);
assertThat(recordToMessage.getHeaders().get(KafkaHeaders.CONSUMER)).isSameAs(mockConsumer);
assertThat(recordToMessage.getHeaders().get("foo")).isEqualTo("bar");
assertThat(recordToMessage.getPayload()).isEqualTo("foo-message-2");
assertThat(recordToMessage.getHeaders().get(KafkaHeaders.GROUP_ID)).isEqualTo("test.group.id");
KafkaUtils.clearConsumerGroupId();
pf.destroy();
}
Aggregations