use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldNotCommitNonRunningNonRestoringTasks.
@Test
public void shouldNotCommitNonRunningNonRestoringTasks() {
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumer, consumerGroupMetadata);
final Task task1 = mock(Task.class);
final Task task2 = mock(Task.class);
final Task task3 = mock(Task.class);
final TaskId taskId1 = new TaskId(0, 1);
final TaskId taskId2 = new TaskId(0, 2);
final TaskId taskId3 = new TaskId(0, 3);
expect(task1.state()).andReturn(Task.State.RUNNING).anyTimes();
expect(task1.id()).andReturn(taskId1).anyTimes();
expect(task2.state()).andReturn(Task.State.RESTORING).anyTimes();
expect(task2.id()).andReturn(taskId2).anyTimes();
expect(task3.state()).andReturn(Task.State.CREATED).anyTimes();
expect(task3.id()).andReturn(taskId3).anyTimes();
expect(taskManager.tasks()).andReturn(mkMap(mkEntry(taskId1, task1), mkEntry(taskId2, task2), mkEntry(taskId3, task3))).anyTimes();
// expect not to try and commit task3, because it's not running.
expect(taskManager.commit(mkSet(task1, task2))).andReturn(2).times(1);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
EasyMock.replay(task1, task2, task3, taskManager);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
verify(taskManager);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldCatchTimeoutExceptionFromHandleCorruptionAndInvokeExceptionHandler.
@Test
@SuppressWarnings("unchecked")
public void shouldCatchTimeoutExceptionFromHandleCorruptionAndInvokeExceptionHandler() {
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
consumer.subscribe((Collection<String>) anyObject(), anyObject());
EasyMock.expectLastCall().atLeastOnce();
consumer.unsubscribe();
EasyMock.expectLastCall().atLeastOnce();
EasyMock.replay(consumerGroupMetadata);
final Task task1 = mock(Task.class);
final Task task2 = mock(Task.class);
final TaskId taskId1 = new TaskId(0, 0);
final TaskId taskId2 = new TaskId(0, 2);
final Set<TaskId> corruptedTasks = singleton(taskId1);
expect(task1.state()).andStubReturn(Task.State.RUNNING);
expect(task1.id()).andStubReturn(taskId1);
expect(task2.state()).andStubReturn(Task.State.RUNNING);
expect(task2.id()).andStubReturn(taskId2);
taskManager.handleCorruption(corruptedTasks);
expectLastCall().andThrow(new TimeoutException());
EasyMock.replay(task1, task2, taskManager, consumer);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = new StreamThread(mockTime, config, null, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null) {
@Override
void runOnce() {
setState(State.PENDING_SHUTDOWN);
throw new TaskCorruptedException(corruptedTasks);
}
}.updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
final AtomicBoolean exceptionHandlerInvoked = new AtomicBoolean(false);
thread.setStreamsUncaughtExceptionHandler((e, b) -> exceptionHandlerInvoked.set(true));
thread.run();
verify(taskManager);
assertThat(exceptionHandlerInvoked.get(), is(true));
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldEnforceRebalanceAfterNextScheduledProbingRebalanceTime.
@Test
public void shouldEnforceRebalanceAfterNextScheduledProbingRebalanceTime() throws InterruptedException {
final StreamsConfig config = new StreamsConfig(configProps(false));
internalTopologyBuilder.buildTopology();
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, APPLICATION_ID, config.getString(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG), mockTime);
final Consumer<byte[], byte[]> mockConsumer = EasyMock.createNiceMock(Consumer.class);
expect(mockConsumer.poll(anyObject())).andStubReturn(ConsumerRecords.empty());
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(mockConsumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumerGroupMetadata);
final EasyMockConsumerClientSupplier mockClientSupplier = new EasyMockConsumerClientSupplier(mockConsumer);
mockClientSupplier.setCluster(createCluster());
EasyMock.replay(mockConsumer);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = StreamThread.create(topologyMetadata, config, mockClientSupplier, mockClientSupplier.getAdmin(config.getAdminConfigs(CLIENT_ID)), PROCESS_ID, CLIENT_ID, streamsMetrics, mockTime, streamsMetadataState, 0, stateDirectory, new MockStateRestoreListener(), threadIdx, null, null);
mockConsumer.enforceRebalance();
mockClientSupplier.nextRebalanceMs().set(mockTime.milliseconds() - 1L);
thread.start();
TestUtils.waitForCondition(() -> thread.state() == StreamThread.State.STARTING, 10 * 1000, "Thread never started.");
TestUtils.retryOnExceptionWithTimeout(() -> verify(mockConsumer));
thread.shutdown();
TestUtils.waitForCondition(() -> thread.state() == StreamThread.State.DEAD, 10 * 1000, "Thread never shut down.");
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TaskManagerTest method shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringHandleCorruptedWithEOS.
@Test
public void shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringHandleCorruptedWithEOS() {
setUpTaskManager(ProcessingMode.EXACTLY_ONCE_V2);
final StreamsProducer producer = mock(StreamsProducer.class);
expect(activeTaskCreator.threadProducer()).andStubReturn(producer);
final ProcessorStateManager stateManager = EasyMock.createMock(ProcessorStateManager.class);
final AtomicBoolean corruptedTaskChangelogMarkedAsCorrupted = new AtomicBoolean(false);
final StateMachineTask corruptedActiveTask = new StateMachineTask(taskId00, taskId00Partitions, true, stateManager) {
@Override
public void markChangelogAsCorrupted(final Collection<TopicPartition> partitions) {
super.markChangelogAsCorrupted(partitions);
corruptedTaskChangelogMarkedAsCorrupted.set(true);
}
};
stateManager.markChangelogAsCorrupted(taskId00ChangelogPartitions);
final AtomicBoolean uncorruptedTaskChangelogMarkedAsCorrupted = new AtomicBoolean(false);
final StateMachineTask uncorruptedActiveTask = new StateMachineTask(taskId01, taskId01Partitions, true, stateManager) {
@Override
public void markChangelogAsCorrupted(final Collection<TopicPartition> partitions) {
super.markChangelogAsCorrupted(partitions);
uncorruptedTaskChangelogMarkedAsCorrupted.set(true);
}
};
final Map<TopicPartition, OffsetAndMetadata> offsets = singletonMap(t1p1, new OffsetAndMetadata(0L, null));
uncorruptedActiveTask.setCommittableOffsetsAndMetadata(offsets);
stateManager.markChangelogAsCorrupted(taskId01ChangelogPartitions);
// handleAssignment
final Map<TaskId, Set<TopicPartition>> assignment = new HashMap<>();
assignment.putAll(taskId00Assignment);
assignment.putAll(taskId01Assignment);
expect(activeTaskCreator.createTasks(anyObject(), eq(assignment))).andStubReturn(asList(corruptedActiveTask, uncorruptedActiveTask));
topologyBuilder.addSubscribedTopicsFromAssignment(anyObject(), anyString());
expectLastCall().anyTimes();
expectRestoreToBeCompleted(consumer, changeLogReader);
final ConsumerGroupMetadata groupMetadata = new ConsumerGroupMetadata("appId");
expect(consumer.groupMetadata()).andReturn(groupMetadata);
producer.commitTransaction(offsets, groupMetadata);
expectLastCall().andThrow(new TimeoutException());
expect(consumer.assignment()).andStubReturn(union(HashSet::new, taskId00Partitions, taskId01Partitions));
replay(activeTaskCreator, standbyTaskCreator, topologyBuilder, consumer, changeLogReader, stateManager, producer);
taskManager.handleAssignment(assignment, emptyMap());
assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true));
assertThat(uncorruptedActiveTask.state(), is(Task.State.RUNNING));
assertThat(corruptedActiveTask.state(), is(Task.State.RUNNING));
// make sure this will be committed and throw
uncorruptedActiveTask.setCommitNeeded();
final Map<TopicPartition, Long> corruptedActiveTaskChangelogOffsets = singletonMap(t1p0changelog, 0L);
corruptedActiveTask.setChangelogOffsets(corruptedActiveTaskChangelogOffsets);
final Map<TopicPartition, Long> uncorruptedActiveTaskChangelogOffsets = singletonMap(t1p1changelog, 0L);
uncorruptedActiveTask.setChangelogOffsets(uncorruptedActiveTaskChangelogOffsets);
assertThat(uncorruptedActiveTask.commitPrepared, is(false));
assertThat(uncorruptedActiveTask.commitNeeded, is(true));
assertThat(uncorruptedActiveTask.commitCompleted, is(false));
assertThat(corruptedActiveTask.commitPrepared, is(false));
assertThat(corruptedActiveTask.commitNeeded, is(false));
assertThat(corruptedActiveTask.commitCompleted, is(false));
taskManager.handleCorruption(singleton(taskId00));
assertThat(uncorruptedActiveTask.commitPrepared, is(true));
assertThat(uncorruptedActiveTask.commitNeeded, is(false));
// if corrupted due to timeout on commit, should enforce checkpoint with corrupted tasks removed
assertThat(uncorruptedActiveTask.commitCompleted, is(true));
assertThat(corruptedActiveTask.commitPrepared, is(true));
assertThat(corruptedActiveTask.commitNeeded, is(false));
// if corrupted, should enforce checkpoint with corrupted tasks removed
assertThat(corruptedActiveTask.commitCompleted, is(true));
assertThat(corruptedActiveTask.state(), is(Task.State.CREATED));
assertThat(uncorruptedActiveTask.state(), is(Task.State.CREATED));
assertThat(corruptedTaskChangelogMarkedAsCorrupted.get(), is(true));
assertThat(uncorruptedTaskChangelogMarkedAsCorrupted.get(), is(true));
verify(consumer);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamsProducerTest method testThrowTaskMigratedExceptionOnEos.
private void testThrowTaskMigratedExceptionOnEos(final RuntimeException exception) {
// cannot use `eosMockProducer.fenceProducer()` because this would already trigger in `beginTransaction()`
eosAlphaMockProducer.commitTransactionException = exception;
final TaskMigratedException thrown = assertThrows(TaskMigratedException.class, () -> eosAlphaStreamsProducer.commitTransaction(offsetsAndMetadata, new ConsumerGroupMetadata("appId")));
assertThat(eosAlphaMockProducer.sentOffsets(), is(true));
assertThat(thrown.getCause(), is(eosAlphaMockProducer.commitTransactionException));
assertThat(thrown.getMessage(), is("Producer got fenced trying to commit a transaction [test];" + " it means all tasks belonging to this thread should be migrated."));
}
Aggregations