use of org.springframework.kafka.core.ProducerFactory in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testDefinedPartitions.
@Test
public void testDefinedPartitions() throws Exception {
this.logger.info("Start defined parts");
Map<String, Object> props = KafkaTestUtils.consumerProps("test13", "false", embeddedKafka);
TopicPartitionOffset topic1Partition0 = new TopicPartitionOffset(topic13, 0, 0L);
CountDownLatch initialConsumersLatch = new CountDownLatch(2);
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props) {
@Override
protected KafkaConsumer<Integer, String> createKafkaConsumer(Map<String, Object> configs) {
assertThat(configs).containsKey(ConsumerConfig.MAX_POLL_RECORDS_CONFIG);
return new KafkaConsumer<Integer, String>(props) {
@Override
public ConsumerRecords<Integer, String> poll(Duration timeout) {
try {
return super.poll(timeout);
} finally {
initialConsumersLatch.countDown();
}
}
};
}
};
ContainerProperties container1Props = new ContainerProperties(topic1Partition0);
CountDownLatch latch1 = new CountDownLatch(2);
container1Props.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("defined part: " + message);
latch1.countDown();
});
Properties defaultProperties = new Properties();
defaultProperties.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "42");
Properties consumerProperties = new Properties(defaultProperties);
container1Props.setKafkaConsumerProperties(consumerProperties);
CountDownLatch stubbingComplete1 = new CountDownLatch(1);
KafkaMessageListenerContainer<Integer, String> container1 = spyOnContainer(new KafkaMessageListenerContainer<>(cf, container1Props), stubbingComplete1);
container1.setBeanName("b1");
container1.start();
CountDownLatch stopLatch1 = new CountDownLatch(1);
willAnswer(invocation -> {
try {
return invocation.callRealMethod();
} finally {
stopLatch1.countDown();
}
}).given(spyOnConsumer(container1)).commitSync(anyMap(), any());
stubbingComplete1.countDown();
TopicPartitionOffset topic1Partition1 = new TopicPartitionOffset(topic13, 1, 0L);
ContainerProperties container2Props = new ContainerProperties(topic1Partition1);
CountDownLatch latch2 = new CountDownLatch(2);
container2Props.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("defined part: " + message);
latch2.countDown();
});
container2Props.setKafkaConsumerProperties(consumerProperties);
CountDownLatch stubbingComplete2 = new CountDownLatch(1);
KafkaMessageListenerContainer<Integer, String> container2 = spyOnContainer(new KafkaMessageListenerContainer<>(cf, container2Props), stubbingComplete2);
container2.setBeanName("b2");
container2.start();
CountDownLatch stopLatch2 = new CountDownLatch(1);
willAnswer(invocation -> {
try {
return invocation.callRealMethod();
} finally {
stopLatch2.countDown();
}
}).given(spyOnConsumer(container2)).commitSync(anyMap(), any());
stubbingComplete2.countDown();
assertThat(initialConsumersLatch.await(20, TimeUnit.SECONDS)).isTrue();
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
template.setDefaultTopic(topic13);
template.sendDefault(0, 0, "foo");
template.sendDefault(1, 2, "bar");
template.sendDefault(0, 0, "baz");
template.sendDefault(1, 2, "qux");
template.flush();
assertThat(latch1.await(60, TimeUnit.SECONDS)).isTrue();
assertThat(latch2.await(60, TimeUnit.SECONDS)).isTrue();
assertThat(stopLatch1.await(60, TimeUnit.SECONDS)).isTrue();
container1.stop();
assertThat(stopLatch2.await(60, TimeUnit.SECONDS)).isTrue();
container2.stop();
cf = new DefaultKafkaConsumerFactory<>(props);
// reset earliest
ContainerProperties container3Props = new ContainerProperties(topic1Partition0, topic1Partition1);
CountDownLatch latch3 = new CountDownLatch(4);
container3Props.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("defined part e: " + message);
latch3.countDown();
});
final CountDownLatch listenerConsumerAvailableLatch = new CountDownLatch(1);
final CountDownLatch listenerConsumerStartLatch = new CountDownLatch(1);
CountDownLatch stubbingComplete3 = new CountDownLatch(1);
KafkaMessageListenerContainer<Integer, String> resettingContainer = spyOnContainer(new KafkaMessageListenerContainer<Integer, String>(cf, container3Props), stubbingComplete3);
stubSetRunning(listenerConsumerAvailableLatch, listenerConsumerStartLatch, resettingContainer);
resettingContainer.setBeanName("b3");
Executors.newSingleThreadExecutor().submit(resettingContainer::start);
CountDownLatch stopLatch3 = new CountDownLatch(1);
assertThat(listenerConsumerAvailableLatch.await(60, TimeUnit.SECONDS)).isTrue();
willAnswer(invocation -> {
try {
return invocation.callRealMethod();
} finally {
stopLatch3.countDown();
}
}).given(spyOnConsumer(resettingContainer)).commitSync(anyMap(), any());
stubbingComplete3.countDown();
listenerConsumerStartLatch.countDown();
assertThat(latch3.await(60, TimeUnit.SECONDS)).isTrue();
assertThat(stopLatch3.await(60, TimeUnit.SECONDS)).isTrue();
resettingContainer.stop();
assertThat(latch3.getCount()).isEqualTo(0L);
cf = new DefaultKafkaConsumerFactory<>(props);
// reset beginning for part 0, minus one for part 1
topic1Partition0 = new TopicPartitionOffset(topic13, 0, -1000L);
topic1Partition1 = new TopicPartitionOffset(topic13, 1, -1L);
ContainerProperties container4Props = new ContainerProperties(topic1Partition0, topic1Partition1);
CountDownLatch latch4 = new CountDownLatch(3);
AtomicReference<String> receivedMessage = new AtomicReference<>();
container4Props.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("defined part 0, -1: " + message);
receivedMessage.set(message.value());
latch4.countDown();
});
CountDownLatch stubbingComplete4 = new CountDownLatch(1);
resettingContainer = spyOnContainer(new KafkaMessageListenerContainer<>(cf, container4Props), stubbingComplete4);
resettingContainer.setBeanName("b4");
resettingContainer.start();
CountDownLatch stopLatch4 = new CountDownLatch(1);
willAnswer(invocation -> {
try {
return invocation.callRealMethod();
} finally {
stopLatch4.countDown();
}
}).given(spyOnConsumer(resettingContainer)).commitSync(anyMap(), any());
stubbingComplete4.countDown();
assertThat(latch4.await(60, TimeUnit.SECONDS)).isTrue();
assertThat(stopLatch4.await(60, TimeUnit.SECONDS)).isTrue();
resettingContainer.stop();
assertThat(receivedMessage.get()).isIn("baz", "qux");
assertThat(latch4.getCount()).isEqualTo(0L);
// reset plus one
template.sendDefault(0, 0, "FOO");
template.sendDefault(1, 2, "BAR");
template.flush();
topic1Partition0 = new TopicPartitionOffset(topic13, 0, 1L);
topic1Partition1 = new TopicPartitionOffset(topic13, 1, 1L);
ContainerProperties container5Props = new ContainerProperties(topic1Partition0, topic1Partition1);
final CountDownLatch latch5 = new CountDownLatch(4);
final List<String> messages = new ArrayList<>();
container5Props.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("defined part 1: " + message);
messages.add(message.value());
latch5.countDown();
});
CountDownLatch stubbingComplete5 = new CountDownLatch(1);
resettingContainer = spyOnContainer(new KafkaMessageListenerContainer<>(cf, container5Props), stubbingComplete5);
resettingContainer.setBeanName("b5");
resettingContainer.start();
CountDownLatch stopLatch5 = new CountDownLatch(1);
willAnswer(invocation -> {
try {
return invocation.callRealMethod();
} finally {
stopLatch5.countDown();
}
}).given(spyOnConsumer(resettingContainer)).commitSync(anyMap(), any());
stubbingComplete5.countDown();
assertThat(latch5.await(60, TimeUnit.SECONDS)).isTrue();
assertThat(stopLatch5.await(60, TimeUnit.SECONDS)).isTrue();
resettingContainer.stop();
assertThat(messages).contains("baz", "qux", "FOO", "BAR");
this.logger.info("+++++++++++++++++++++ Start relative reset");
template.sendDefault(0, 0, "BAZ");
template.sendDefault(1, 2, "QUX");
template.sendDefault(0, 0, "FIZ");
template.sendDefault(1, 2, "BUZ");
template.flush();
topic1Partition0 = new TopicPartitionOffset(topic13, 0, 1L, true);
topic1Partition1 = new TopicPartitionOffset(topic13, 1, -1L, true);
ContainerProperties container6Props = new ContainerProperties(topic1Partition0, topic1Partition1);
final CountDownLatch latch6 = new CountDownLatch(4);
final List<String> messages6 = new ArrayList<>();
container6Props.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("defined part relative: " + message);
messages6.add(message.value());
latch6.countDown();
});
CountDownLatch stubbingComplete6 = new CountDownLatch(1);
resettingContainer = spyOnContainer(new KafkaMessageListenerContainer<>(cf, container6Props), stubbingComplete6);
resettingContainer.setBeanName("b6");
resettingContainer.start();
CountDownLatch stopLatch6 = new CountDownLatch(1);
willAnswer(invocation -> {
try {
return invocation.callRealMethod();
} finally {
stopLatch6.countDown();
}
}).given(spyOnConsumer(resettingContainer)).commitSync(anyMap(), any());
stubbingComplete6.countDown();
assertThat(latch6.await(60, TimeUnit.SECONDS)).isTrue();
assertThat(stopLatch6.await(60, TimeUnit.SECONDS)).isTrue();
resettingContainer.stop();
assertThat(messages6).hasSize(4);
assertThat(messages6).contains("FIZ", "BAR", "QUX", "BUZ");
this.logger.info("Stop auto parts");
}
use of org.springframework.kafka.core.ProducerFactory in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testBatchAck.
@Test
public void testBatchAck() throws Exception {
logger.info("Start batch ack");
Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka);
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
ContainerProperties containerProps = new ContainerProperties(topic7);
containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("batch ack: " + message);
});
containerProps.setSyncCommits(true);
containerProps.setAckMode(AckMode.BATCH);
containerProps.setPollTimeout(100);
CountDownLatch stubbingComplete = new CountDownLatch(1);
KafkaMessageListenerContainer<Integer, String> container = spyOnContainer(new KafkaMessageListenerContainer<>(cf, containerProps), stubbingComplete);
container.setBeanName("testBatchAcks");
container.start();
Consumer<?, ?> containerConsumer = spyOnConsumer(container);
final CountDownLatch firstBatchLatch = new CountDownLatch(1);
final CountDownLatch latch = new CountDownLatch(2);
willAnswer(invocation -> {
Map<TopicPartition, OffsetAndMetadata> map = invocation.getArgument(0);
for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
if (entry.getValue().offset() == 2) {
firstBatchLatch.countDown();
}
}
try {
return invocation.callRealMethod();
} finally {
for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
if (entry.getValue().offset() == 2) {
latch.countDown();
}
}
}
}).given(containerConsumer).commitSync(anyMap(), any());
stubbingComplete.countDown();
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
template.setDefaultTopic(topic7);
template.sendDefault(0, 0, "foo");
template.sendDefault(0, 0, "baz");
template.sendDefault(1, 0, "bar");
template.sendDefault(1, 0, "qux");
template.flush();
assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue();
assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic7, 0), new TopicPartition(topic7, 1)));
assertThat(consumer.position(new TopicPartition(topic7, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic7, 1))).isEqualTo(2);
container.stop();
consumer.close();
logger.info("Stop batch ack");
}
use of org.springframework.kafka.core.ProducerFactory in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testExceptionWhenCommitAfterRebalance.
@Test
public void testExceptionWhenCommitAfterRebalance() throws Exception {
final CountDownLatch rebalanceLatch = new CountDownLatch(2);
final CountDownLatch consumeFirstLatch = new CountDownLatch(1);
final CountDownLatch consumeLatch = new CountDownLatch(2);
Map<String, Object> props = KafkaTestUtils.consumerProps("test19", "false", embeddedKafka);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 3_000);
DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
ContainerProperties containerProps = new ContainerProperties(topic19);
containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
logger.warn("listener: " + message);
consumeFirstLatch.countDown();
if (consumeLatch.getCount() > 1) {
try {
Thread.sleep(5_000);
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
}
consumeLatch.countDown();
});
containerProps.setSyncCommits(true);
containerProps.setAckMode(AckMode.BATCH);
containerProps.setPollTimeout(100);
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
template.setDefaultTopic(topic19);
containerProps.setConsumerRebalanceListener(new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
logger.warn("rebalance occurred.");
rebalanceLatch.countDown();
}
});
KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps);
container.setBeanName("testContainerException");
container.start();
ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
template.sendDefault(0, 0, "a");
assertThat(consumeFirstLatch.await(60, TimeUnit.SECONDS)).isTrue();
// should be rebalanced and consume again
boolean rebalancedForTooLongBetweenPolls = rebalanceLatch.await(60, TimeUnit.SECONDS);
int n = 0;
while (!rebalancedForTooLongBetweenPolls & n++ < 3) {
// try a few times in case the rebalance was delayed
template.sendDefault(0, 0, "a");
rebalancedForTooLongBetweenPolls = rebalanceLatch.await(60, TimeUnit.SECONDS);
}
if (!rebalancedForTooLongBetweenPolls) {
logger.error("Rebalance did not occur - perhaps the CI server is too busy, don't fail the test");
}
assertThat(consumeLatch.await(60, TimeUnit.SECONDS)).isTrue();
container.stop();
}
use of org.springframework.kafka.core.ProducerFactory in project spring-kafka by spring-projects.
the class KafkaMessageListenerContainerTests method testCommitsAreFlushedOnStop.
@SuppressWarnings("unchecked")
@Test
public void testCommitsAreFlushedOnStop() throws Exception {
Map<String, Object> props = KafkaTestUtils.consumerProps("flushedOnStop", "false", embeddedKafka);
DefaultKafkaConsumerFactory<Integer, String> cf = spy(new DefaultKafkaConsumerFactory<>(props));
AtomicReference<Consumer<Integer, String>> consumer = new AtomicReference<>();
willAnswer(inv -> {
consumer.set((Consumer<Integer, String>) spy(inv.callRealMethod()));
return consumer.get();
}).given(cf).createConsumer(any(), any(), any(), any());
ContainerProperties containerProps = new ContainerProperties(topic5);
containerProps.setAckCount(1);
// set large values, ensuring that commits don't happen before `stop()`
containerProps.setAckTime(20000);
containerProps.setAckCount(20000);
containerProps.setAckMode(AckMode.COUNT_TIME);
containerProps.setAssignmentCommitOption(AssignmentCommitOption.ALWAYS);
final CountDownLatch latch = new CountDownLatch(4);
containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
logger.info("flushed: " + message);
latch.countDown();
});
KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps);
container.setBeanName("testManualFlushed");
container.start();
ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
template.setDefaultTopic(topic5);
template.sendDefault(0, 0, "foo");
template.sendDefault(1, 2, "bar");
template.flush();
Thread.sleep(300);
template.sendDefault(0, 0, "fiz");
template.sendDefault(1, 2, "buz");
template.flush();
// Verify that commitSync is called when paused
assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
// Verify that just the initial commit is processed before stop
verify(consumer.get(), times(1)).commitSync(anyMap(), any());
container.stop();
// Verify that a commit has been made on stop
verify(consumer.get(), times(2)).commitSync(anyMap(), any());
}
use of org.springframework.kafka.core.ProducerFactory in project spring-kafka by spring-projects.
the class TransactionalContainerTests method testConsumeAndProduceTransactionGuts.
@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" })
private void testConsumeAndProduceTransactionGuts(boolean handleError, AckMode ackMode, EOSMode eosMode, boolean stopWhenFenced) throws Exception {
Consumer consumer = mock(Consumer.class);
AtomicBoolean assigned = new AtomicBoolean();
final TopicPartition topicPartition = new TopicPartition("foo", 0);
willAnswer(i -> {
((ConsumerRebalanceListener) i.getArgument(1)).onPartitionsAssigned(Collections.singletonList(topicPartition));
assigned.set(true);
return null;
}).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class));
ConsumerRecords records = new ConsumerRecords(Collections.singletonMap(topicPartition, Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value"))));
ConsumerRecords empty = new ConsumerRecords(Collections.emptyMap());
final AtomicBoolean done = new AtomicBoolean();
willAnswer(i -> {
if (done.compareAndSet(false, true)) {
return records;
} else {
Thread.sleep(500);
return empty;
}
}).given(consumer).poll(any(Duration.class));
ConsumerFactory cf = mock(ConsumerFactory.class);
willReturn(consumer).given(cf).createConsumer("group", "", null, KafkaTestUtils.defaultPropertyOverrides());
Producer producer = mock(Producer.class);
if (stopWhenFenced) {
willAnswer(inv -> {
if (assigned.get()) {
throw new ProducerFencedException("fenced");
}
return null;
}).given(producer).sendOffsetsToTransaction(any(), any(ConsumerGroupMetadata.class));
}
given(producer.send(any(), any())).willReturn(new SettableListenableFuture<>());
final CountDownLatch closeLatch = new CountDownLatch(2);
willAnswer(i -> {
closeLatch.countDown();
return null;
}).given(producer).close(any());
ProducerFactory pf = mock(ProducerFactory.class);
given(pf.isProducerPerConsumerPartition()).willReturn(true);
given(pf.transactionCapable()).willReturn(true);
final List<String> transactionalIds = new ArrayList<>();
willAnswer(i -> {
transactionalIds.add(TransactionSupport.getTransactionIdSuffix());
return producer;
}).given(pf).createProducer(isNull());
KafkaTransactionManager tm = new KafkaTransactionManager(pf);
ContainerProperties props = new ContainerProperties("foo");
props.setAckMode(ackMode);
props.setGroupId("group");
props.setTransactionManager(tm);
props.setAssignmentCommitOption(AssignmentCommitOption.ALWAYS);
props.setEosMode(eosMode);
props.setStopContainerWhenFenced(stopWhenFenced);
ConsumerGroupMetadata consumerGroupMetadata = new ConsumerGroupMetadata("group");
given(consumer.groupMetadata()).willReturn(consumerGroupMetadata);
final KafkaTemplate template = new KafkaTemplate(pf);
if (AckMode.MANUAL_IMMEDIATE.equals(ackMode)) {
props.setMessageListener((AcknowledgingMessageListener<Object, Object>) (data, acknowledgment) -> {
template.send("bar", "baz");
if (handleError) {
throw new RuntimeException("fail");
}
acknowledgment.acknowledge();
});
} else {
props.setMessageListener((MessageListener) m -> {
template.send("bar", "baz");
if (handleError) {
throw new RuntimeException("fail");
}
});
}
KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props);
container.setBeanName("commit");
if (handleError) {
container.setCommonErrorHandler(new CommonErrorHandler() {
});
}
CountDownLatch stopEventLatch = new CountDownLatch(1);
AtomicReference<ConsumerStoppedEvent> stopEvent = new AtomicReference<>();
container.setApplicationEventPublisher(event -> {
if (event instanceof ConsumerStoppedEvent) {
stopEvent.set((ConsumerStoppedEvent) event);
stopEventLatch.countDown();
}
});
container.start();
assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
InOrder inOrder = inOrder(producer);
inOrder.verify(producer).beginTransaction();
inOrder.verify(producer).sendOffsetsToTransaction(Collections.singletonMap(topicPartition, new OffsetAndMetadata(0)), consumerGroupMetadata);
if (stopWhenFenced) {
assertThat(stopEventLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(stopEvent.get().getReason()).isEqualTo(Reason.FENCED);
} else {
inOrder.verify(producer).commitTransaction();
inOrder.verify(producer).close(any());
inOrder.verify(producer).beginTransaction();
ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class);
inOrder.verify(producer).send(captor.capture(), any(Callback.class));
assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz"));
inOrder.verify(producer).sendOffsetsToTransaction(Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), consumerGroupMetadata);
inOrder.verify(producer).commitTransaction();
inOrder.verify(producer).close(any());
container.stop();
verify(pf, times(2)).createProducer(isNull());
verifyNoMoreInteractions(producer);
assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0");
assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0");
assertThat(stopEventLatch.await(10, TimeUnit.SECONDS)).isTrue();
assertThat(stopEvent.get().getReason()).isEqualTo(Reason.NORMAL);
}
assertThat(stopEvent.get().getSource()).isSameAs(container);
}
Aggregations