use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerWasFencedWhileProcessing.
@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerWasFencedWhileProcessing() throws Exception {
internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
internalTopologyBuilder.addSink("sink", "dummyTopic", null, null, null, "source");
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
final MockConsumer<byte[], byte[]> consumer = clientSupplier.consumer;
consumer.updatePartitions(topic1, Collections.singletonList(new PartitionInfo(topic1, 1, null, null, null)));
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
final MockProducer<byte[], byte[]> producer = clientSupplier.producers.get(0);
// change consumer subscription from "pattern" to "manual" to be able to call .addRecords()
consumer.updateBeginningOffsets(Collections.singletonMap(assignedPartitions.iterator().next(), 0L));
consumer.unsubscribe();
consumer.assign(new HashSet<>(assignedPartitions));
consumer.addRecord(new ConsumerRecord<>(topic1, 1, 0, new byte[0], new byte[0]));
mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
thread.runOnce();
assertThat(producer.history().size(), equalTo(1));
mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
TestUtils.waitForCondition(() -> producer.commitCount() == 1, "StreamsThread did not commit transaction.");
producer.fenceProducer();
mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
consumer.addRecord(new ConsumerRecord<>(topic1, 1, 1, new byte[0], new byte[0]));
try {
thread.runOnce();
fail("Should have thrown TaskMigratedException");
} catch (final KafkaException expected) {
assertTrue(expected instanceof TaskMigratedException);
assertTrue("StreamsThread removed the fenced zombie task already, should wait for rebalance to close all zombies together.", thread.activeTasks().stream().anyMatch(task -> task.id().equals(task1)));
}
assertThat(producer.commitCount(), equalTo(1L));
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldRequestCommittedOffsetsAndHandleTimeoutException.
@Test
public void shouldRequestCommittedOffsetsAndHandleTimeoutException() {
final TaskId taskId = new TaskId(0, 0);
final Task mockTask = mock(Task.class);
if (type == ACTIVE) {
mockTask.clearTaskTimeout();
}
mockTask.maybeInitTaskTimeoutOrThrow(anyLong(), anyObject());
EasyMock.expectLastCall();
EasyMock.expect(stateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
EasyMock.expect(storeMetadata.offset()).andReturn(5L).anyTimes();
EasyMock.expect(stateManager.changelogOffsets()).andReturn(singletonMap(tp, 5L));
EasyMock.expect(stateManager.taskId()).andReturn(taskId).anyTimes();
EasyMock.replay(mockTask, stateManager, storeMetadata, store);
final AtomicBoolean functionCalled = new AtomicBoolean(false);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
if (functionCalled.get()) {
return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(10L)));
} else {
functionCalled.set(true);
throw new TimeoutException("KABOOM!");
}
}
};
adminClient.updateEndOffsets(Collections.singletonMap(tp, 20L));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.register(tp, stateManager);
changelogReader.restore(Collections.singletonMap(taskId, mockTask));
assertEquals(type == ACTIVE ? StoreChangelogReader.ChangelogState.REGISTERED : StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
if (type == ACTIVE) {
assertNull(changelogReader.changelogMetadata(tp).endOffset());
} else {
assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
}
assertTrue(functionCalled.get());
verify(mockTask);
resetToDefault(mockTask);
if (type == ACTIVE) {
mockTask.clearTaskTimeout();
mockTask.clearTaskTimeout();
expectLastCall();
}
replay(mockTask);
changelogReader.restore(Collections.singletonMap(taskId, mockTask));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(type == ACTIVE ? 10L : 0L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, consumer.position(tp));
verify(mockTask);
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldRequestEndOffsetsAndHandleTimeoutException.
@Test
public void shouldRequestEndOffsetsAndHandleTimeoutException() {
final TaskId taskId = new TaskId(0, 0);
final Task mockTask = mock(Task.class);
mockTask.maybeInitTaskTimeoutOrThrow(anyLong(), anyObject());
EasyMock.expectLastCall();
EasyMock.expect(storeMetadata.offset()).andReturn(5L).anyTimes();
EasyMock.expect(activeStateManager.changelogOffsets()).andReturn(singletonMap(tp, 5L));
EasyMock.expect(activeStateManager.taskId()).andReturn(taskId).anyTimes();
EasyMock.replay(mockTask, activeStateManager, storeMetadata, store);
final AtomicBoolean functionCalled = new AtomicBoolean(false);
final MockAdminClient adminClient = new MockAdminClient() {
@Override
public ListOffsetsResult listOffsets(final Map<TopicPartition, OffsetSpec> topicPartitionOffsets, final ListOffsetsOptions options) {
if (functionCalled.get()) {
return super.listOffsets(topicPartitionOffsets, options);
} else {
functionCalled.set(true);
throw new TimeoutException("KABOOM!");
}
}
};
adminClient.updateEndOffsets(Collections.singletonMap(tp, 10L));
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
throw new AssertionError("Should not trigger this function");
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.register(tp, activeStateManager);
changelogReader.restore(Collections.singletonMap(taskId, mockTask));
assertEquals(StoreChangelogReader.ChangelogState.REGISTERED, changelogReader.changelogMetadata(tp).state());
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertTrue(functionCalled.get());
verify(mockTask);
EasyMock.resetToDefault(mockTask);
mockTask.clearTaskTimeout();
EasyMock.replay(mockTask);
changelogReader.restore(Collections.singletonMap(taskId, mockTask));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, consumer.position(tp));
verify(mockTask);
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldRequestPositionAndHandleTimeoutException.
@Test
public void shouldRequestPositionAndHandleTimeoutException() {
final TaskId taskId = new TaskId(0, 0);
final Task mockTask = mock(Task.class);
mockTask.clearTaskTimeout();
mockTask.maybeInitTaskTimeoutOrThrow(anyLong(), anyObject());
EasyMock.expectLastCall();
EasyMock.expect(storeMetadata.offset()).andReturn(10L).anyTimes();
EasyMock.expect(activeStateManager.changelogOffsets()).andReturn(singletonMap(tp, 10L));
EasyMock.expect(activeStateManager.taskId()).andReturn(taskId).anyTimes();
EasyMock.replay(mockTask, activeStateManager, storeMetadata, store);
final AtomicBoolean clearException = new AtomicBoolean(false);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public long position(final TopicPartition partition) {
if (clearException.get()) {
return 10L;
} else {
throw new TimeoutException("KABOOM!");
}
}
};
adminClient.updateEndOffsets(Collections.singletonMap(tp, 10L));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.register(tp, activeStateManager);
changelogReader.restore(Collections.singletonMap(taskId, mockTask));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertTrue(changelogReader.completedChangelogs().isEmpty());
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
verify(mockTask);
clearException.set(true);
resetToDefault(mockTask);
mockTask.clearTaskTimeout();
EasyMock.expectLastCall();
EasyMock.replay(mockTask);
changelogReader.restore(Collections.singletonMap(taskId, mockTask));
assertEquals(StoreChangelogReader.ChangelogState.COMPLETED, changelogReader.changelogMetadata(tp).state());
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(Collections.singleton(tp), changelogReader.completedChangelogs());
assertEquals(10L, consumer.position(tp));
verify(mockTask);
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenCommitting.
@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenCommitting() {
// only have source but no sink so that we would not get fenced in producer.send
internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
final MockConsumer<byte[], byte[]> consumer = clientSupplier.consumer;
consumer.updatePartitions(topic1, Collections.singletonList(new PartitionInfo(topic1, 1, null, null, null)));
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
final MockProducer<byte[], byte[]> producer = clientSupplier.producers.get(0);
producer.commitTransactionException = new ProducerFencedException("Producer is fenced");
mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
consumer.addRecord(new ConsumerRecord<>(topic1, 1, 1, new byte[0], new byte[0]));
try {
thread.runOnce();
fail("Should have thrown TaskMigratedException");
} catch (final KafkaException expected) {
assertTrue(expected instanceof TaskMigratedException);
assertTrue("StreamsThread removed the fenced zombie task already, should wait for rebalance to close all zombies together.", thread.activeTasks().stream().anyMatch(task -> task.id().equals(task1)));
}
assertThat(producer.commitCount(), equalTo(0L));
assertTrue(clientSupplier.producers.get(0).transactionInFlight());
assertFalse(clientSupplier.producers.get(0).transactionCommitted());
assertFalse(clientSupplier.producers.get(0).closed());
assertEquals(1, thread.activeTasks().size());
}
Aggregations