use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldConstructAdminMetrics.
@Test
public void shouldConstructAdminMetrics() {
final Node broker1 = new Node(0, "dummyHost-1", 1234);
final Node broker2 = new Node(1, "dummyHost-2", 1234);
final List<Node> cluster = Arrays.asList(broker1, broker2);
final MockAdminClient adminClient = new MockAdminClient.Builder().brokers(cluster).clusterId(null).build();
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumer, consumerGroupMetadata);
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = new StreamThread(mockTime, config, adminClient, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null);
final MetricName testMetricName = new MetricName("test_metric", "", "", new HashMap<>());
final Metric testMetric = new KafkaMetric(new Object(), testMetricName, (Measurable) (config, now) -> 0, null, new MockTime());
EasyMock.replay(taskManager);
adminClient.setMockMetrics(testMetricName, testMetric);
final Map<MetricName, Metric> adminClientMetrics = thread.adminClientMetrics();
assertEquals(testMetricName, adminClientMetrics.get(testMetricName).metricName());
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TaskManagerTest method shouldCommitViaProducerIfEosAlphaEnabled.
@Test
public void shouldCommitViaProducerIfEosAlphaEnabled() {
final StreamsProducer producer = mock(StreamsProducer.class);
expect(activeTaskCreator.streamsProducerForTask(anyObject(TaskId.class))).andReturn(producer).andReturn(producer);
final Map<TopicPartition, OffsetAndMetadata> offsetsT01 = singletonMap(t1p1, new OffsetAndMetadata(0L, null));
final Map<TopicPartition, OffsetAndMetadata> offsetsT02 = singletonMap(t1p2, new OffsetAndMetadata(1L, null));
producer.commitTransaction(offsetsT01, new ConsumerGroupMetadata("appId"));
expectLastCall();
producer.commitTransaction(offsetsT02, new ConsumerGroupMetadata("appId"));
expectLastCall();
shouldCommitViaProducerIfEosEnabled(ProcessingMode.EXACTLY_ONCE_ALPHA, producer, offsetsT01, offsetsT02);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TaskManagerTest method shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringRevocationWithEOS.
@Test
public void shouldCloseAndReviveUncorruptedTasksWhenTimeoutExceptionThrownFromCommitDuringRevocationWithEOS() {
setUpTaskManager(ProcessingMode.EXACTLY_ONCE_V2);
final StreamsProducer producer = mock(StreamsProducer.class);
expect(activeTaskCreator.threadProducer()).andStubReturn(producer);
final ProcessorStateManager stateManager = EasyMock.createMock(ProcessorStateManager.class);
final StateMachineTask revokedActiveTask = new StateMachineTask(taskId00, taskId00Partitions, true, stateManager);
final Map<TopicPartition, OffsetAndMetadata> revokedActiveTaskOffsets = singletonMap(t1p0, new OffsetAndMetadata(0L, null));
revokedActiveTask.setCommittableOffsetsAndMetadata(revokedActiveTaskOffsets);
revokedActiveTask.setCommitNeeded();
final AtomicBoolean unrevokedTaskChangelogMarkedAsCorrupted = new AtomicBoolean(false);
final StateMachineTask unrevokedActiveTask = new StateMachineTask(taskId01, taskId01Partitions, true, stateManager) {
@Override
public void markChangelogAsCorrupted(final Collection<TopicPartition> partitions) {
super.markChangelogAsCorrupted(partitions);
unrevokedTaskChangelogMarkedAsCorrupted.set(true);
}
};
final Map<TopicPartition, OffsetAndMetadata> unrevokedTaskOffsets = singletonMap(t1p1, new OffsetAndMetadata(1L, null));
unrevokedActiveTask.setCommittableOffsetsAndMetadata(unrevokedTaskOffsets);
unrevokedActiveTask.setCommitNeeded();
final StateMachineTask unrevokedActiveTaskWithoutCommitNeeded = new StateMachineTask(taskId02, taskId02Partitions, true, stateManager);
final Map<TopicPartition, OffsetAndMetadata> expectedCommittedOffsets = new HashMap<>();
expectedCommittedOffsets.putAll(revokedActiveTaskOffsets);
expectedCommittedOffsets.putAll(unrevokedTaskOffsets);
stateManager.markChangelogAsCorrupted(taskId00ChangelogPartitions);
stateManager.markChangelogAsCorrupted(taskId01ChangelogPartitions);
final Map<TaskId, Set<TopicPartition>> assignmentActive = mkMap(mkEntry(taskId00, taskId00Partitions), mkEntry(taskId01, taskId01Partitions), mkEntry(taskId02, taskId02Partitions));
expectRestoreToBeCompleted(consumer, changeLogReader);
expect(activeTaskCreator.createTasks(anyObject(), eq(assignmentActive))).andReturn(asList(revokedActiveTask, unrevokedActiveTask, unrevokedActiveTaskWithoutCommitNeeded));
activeTaskCreator.closeAndRemoveTaskProducerIfNeeded(taskId00);
expectLastCall();
final ConsumerGroupMetadata groupMetadata = new ConsumerGroupMetadata("appId");
expect(consumer.groupMetadata()).andReturn(groupMetadata);
producer.commitTransaction(expectedCommittedOffsets, groupMetadata);
expectLastCall().andThrow(new TimeoutException());
expect(consumer.assignment()).andStubReturn(union(HashSet::new, taskId00Partitions, taskId01Partitions, taskId02Partitions));
replay(activeTaskCreator, standbyTaskCreator, consumer, changeLogReader, producer, stateManager);
taskManager.handleAssignment(assignmentActive, emptyMap());
assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true));
assertThat(revokedActiveTask.state(), is(Task.State.RUNNING));
assertThat(unrevokedActiveTask.state(), is(Task.State.RUNNING));
assertThat(unrevokedActiveTaskWithoutCommitNeeded.state(), is(State.RUNNING));
final Map<TopicPartition, Long> revokedActiveTaskChangelogOffsets = singletonMap(t1p0changelog, 0L);
revokedActiveTask.setChangelogOffsets(revokedActiveTaskChangelogOffsets);
final Map<TopicPartition, Long> unrevokedActiveTaskChangelogOffsets = singletonMap(t1p1changelog, 0L);
unrevokedActiveTask.setChangelogOffsets(unrevokedActiveTaskChangelogOffsets);
taskManager.handleRevocation(taskId00Partitions);
assertThat(unrevokedTaskChangelogMarkedAsCorrupted.get(), is(true));
assertThat(revokedActiveTask.state(), is(State.SUSPENDED));
assertThat(unrevokedActiveTask.state(), is(State.CREATED));
assertThat(unrevokedActiveTaskWithoutCommitNeeded.state(), is(State.RUNNING));
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldNotCauseExceptionIfNothingCommitted.
@Test
public void shouldNotCauseExceptionIfNothingCommitted() {
final long commitInterval = 1000L;
final Properties props = configProps(false);
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, stateDir);
props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
final StreamsConfig config = new StreamsConfig(props);
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumer, consumerGroupMetadata);
final TaskManager taskManager = mockTaskManagerCommit(consumer, 1, 0);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
mockTime.sleep(commitInterval - 10L);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
verify(taskManager);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldNotEnforceRebalanceWhenTaskCorruptedExceptionIsThrownForAnInactiveTask.
@Test
@SuppressWarnings("unchecked")
public void shouldNotEnforceRebalanceWhenTaskCorruptedExceptionIsThrownForAnInactiveTask() {
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
final Consumer<byte[], byte[]> consumer = mock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
consumer.subscribe((Collection<String>) anyObject(), anyObject());
EasyMock.expectLastCall().anyTimes();
consumer.unsubscribe();
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(consumerGroupMetadata);
final Task task1 = mock(Task.class);
final Task task2 = mock(Task.class);
final TaskId taskId1 = new TaskId(0, 0);
final TaskId taskId2 = new TaskId(0, 2);
final Set<TaskId> corruptedTasks = singleton(taskId1);
expect(task1.state()).andReturn(Task.State.CLOSED).anyTimes();
expect(task1.id()).andReturn(taskId1).anyTimes();
expect(task2.state()).andReturn(Task.State.CLOSED).anyTimes();
expect(task2.id()).andReturn(taskId2).anyTimes();
expect(taskManager.handleCorruption(corruptedTasks)).andReturn(false);
EasyMock.replay(task1, task2, taskManager, consumer);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = new StreamThread(mockTime, eosEnabledConfig, null, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null) {
@Override
void runOnce() {
setState(State.PENDING_SHUTDOWN);
throw new TaskCorruptedException(corruptedTasks);
}
}.updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
thread.setState(StreamThread.State.STARTING);
thread.runLoop();
verify(taskManager);
verify(consumer);
}
Aggregations