use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.
the class GlobalStreamThreadTest method shouldThrowStreamsExceptionOnStartupIfExceptionOccurred.
@Test
public void shouldThrowStreamsExceptionOnStartupIfExceptionOccurred() throws Exception {
final MockConsumer<byte[], byte[]> mockConsumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public List<PartitionInfo> partitionsFor(final String topic) {
throw new RuntimeException("KABOOM!");
}
};
final StateStore globalStore = builder.globalStateStores().get(GLOBAL_STORE_NAME);
globalStreamThread = new GlobalStreamThread(builder.buildGlobalStateTopology(), config, mockConsumer, new StateDirectory(config, time, true, false), 0, new StreamsMetricsImpl(new Metrics(), "test-client", StreamsConfig.METRICS_LATEST, time), time, "clientId", stateRestoreListener, e -> {
});
try {
globalStreamThread.start();
fail("Should have thrown StreamsException if start up failed");
} catch (final StreamsException e) {
assertThat(e.getCause(), instanceOf(RuntimeException.class));
assertThat(e.getCause().getMessage(), equalTo("KABOOM!"));
}
globalStreamThread.join();
assertThat(globalStore.isOpen(), is(false));
assertFalse(globalStreamThread.stillRunning());
}
use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.
the class StreamThreadTest method shouldRecordCommitLatency.
@Test
public void shouldRecordCommitLatency() {
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
expect(consumer.poll(anyObject())).andStubReturn(new ConsumerRecords<>(Collections.emptyMap()));
final Task task = niceMock(Task.class);
expect(task.id()).andStubReturn(task1);
expect(task.inputPartitions()).andStubReturn(Collections.singleton(t1p1));
expect(task.committedOffsets()).andStubReturn(Collections.emptyMap());
expect(task.highWaterMark()).andStubReturn(Collections.emptyMap());
final ActiveTaskCreator activeTaskCreator = mock(ActiveTaskCreator.class);
expect(activeTaskCreator.createTasks(anyObject(), anyObject())).andStubReturn(Collections.singleton(task));
expect(activeTaskCreator.producerClientIds()).andStubReturn(Collections.singleton("producerClientId"));
expect(activeTaskCreator.uncreatedTasksForTopologies(anyObject())).andStubReturn(emptyMap());
activeTaskCreator.removeRevokedUnknownTasks(singleton(task1));
final StandbyTaskCreator standbyTaskCreator = mock(StandbyTaskCreator.class);
expect(standbyTaskCreator.uncreatedTasksForTopologies(anyObject())).andStubReturn(emptyMap());
standbyTaskCreator.removeRevokedUnknownTasks(emptySet());
EasyMock.replay(consumer, consumerGroupMetadata, task, activeTaskCreator, standbyTaskCreator);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final TaskManager taskManager = new TaskManager(null, null, null, null, null, activeTaskCreator, standbyTaskCreator, topologyMetadata, null, null) {
@Override
int commit(final Collection<Task> tasksToCommit) {
mockTime.sleep(10L);
return 1;
}
};
taskManager.setMainConsumer(consumer);
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
thread.updateThreadMetadata("adminClientId");
thread.setState(StreamThread.State.STARTING);
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
thread.rebalanceListener().onPartitionsAssigned(Collections.singleton(t1p1));
assertTrue(Double.isNaN((Double) streamsMetrics.metrics().get(new MetricName("commit-latency-max", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue()));
assertTrue(Double.isNaN((Double) streamsMetrics.metrics().get(new MetricName("commit-latency-avg", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue()));
thread.runOnce();
assertThat(streamsMetrics.metrics().get(new MetricName("commit-latency-max", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue(), equalTo(10.0));
assertThat(streamsMetrics.metrics().get(new MetricName("commit-latency-avg", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue(), equalTo(10.0));
}
use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.
the class StreamThreadTest method shouldShutdownTaskManagerOnClose.
@Test
public void shouldShutdownTaskManagerOnClose() {
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumerGroupMetadata);
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
taskManager.shutdown(true);
EasyMock.expectLastCall();
EasyMock.replay(taskManager, consumer);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
thread.setStateListener((t, newState, oldState) -> {
if (oldState == StreamThread.State.CREATED && newState == StreamThread.State.STARTING) {
thread.shutdown();
}
});
thread.run();
verify(taskManager);
}
use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.
the class StreamThreadTest method createStreamThread.
private StreamThread createStreamThread(@SuppressWarnings("SameParameterValue") final String clientId, final StreamsConfig config, final boolean eosEnabled) {
if (eosEnabled) {
clientSupplier.setApplicationIdForProducer(APPLICATION_ID);
}
clientSupplier.setCluster(createCluster());
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, APPLICATION_ID, config.getString(StreamsConfig.BUILT_IN_METRICS_VERSION_CONFIG), mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
return StreamThread.create(topologyMetadata, config, clientSupplier, clientSupplier.getAdmin(config.getAdminConfigs(clientId)), PROCESS_ID, clientId, streamsMetrics, mockTime, streamsMetadataState, 0, stateDirectory, new MockStateRestoreListener(), threadIdx, null, HANDLER);
}
use of org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl in project kafka by apache.
the class StreamThreadTest method shouldThrowTaskMigratedExceptionHandlingTaskLost.
@Test
public void shouldThrowTaskMigratedExceptionHandlingTaskLost() {
final Set<TopicPartition> assignedPartitions = Collections.singleton(t1p1);
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<>(OffsetResetStrategy.LATEST);
consumer.assign(assignedPartitions);
consumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
consumer.updateEndOffsets(Collections.singletonMap(t1p1, 10L));
taskManager.handleLostAll();
EasyMock.expectLastCall().andThrow(new TaskMigratedException("Task lost exception", new RuntimeException()));
EasyMock.replay(taskManager);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
consumer.schedulePollTask(() -> {
thread.setState(StreamThread.State.PARTITIONS_REVOKED);
thread.rebalanceListener().onPartitionsLost(assignedPartitions);
});
thread.setState(StreamThread.State.STARTING);
assertThrows(TaskMigratedException.class, thread::runOnce);
}
Aggregations