use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamThreadTest method shouldNotNullPointerWhenStandbyTasksAssignedAndNoStateStoresForTopology.
@Test
public void shouldNotNullPointerWhenStandbyTasksAssignedAndNoStateStoresForTopology() throws Exception {
final TopologyBuilder builder = new TopologyBuilder();
builder.setApplicationId(applicationId).addSource("name", "topic").addSink("out", "output");
final StreamsConfig config = new StreamsConfig(configProps());
final StreamThread thread = new StreamThread(builder, config, new MockClientSupplier(), applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
thread.partitionAssignor(new StreamPartitionAssignor() {
@Override
Map<TaskId, Set<TopicPartition>> standbyTasks() {
return Collections.singletonMap(new TaskId(0, 0), Utils.mkSet(new TopicPartition("topic", 0)));
}
});
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList());
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState.
@Test
public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState() throws Exception {
final KStreamBuilder builder = new KStreamBuilder();
builder.setApplicationId(applicationId);
builder.stream("t1").groupByKey();
final StreamsConfig config = new StreamsConfig(configProps());
final MockClientSupplier clientSupplier = new MockClientSupplier();
final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {
@Override
public void flushState() {
throw new RuntimeException("KABOOM!");
}
};
final StreamsConfig config1 = new StreamsConfig(configProps());
final StreamThread thread = new StreamThread(builder, config1, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {
@Override
protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
return testStreamTask;
}
};
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(testStreamTask.id, testStreamTask.partitions);
thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
try {
thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
fail("should have thrown exception");
} catch (Exception e) {
// expected
}
assertFalse(testStreamTask.committed);
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamsMetricsImplTest method testRemoveNullSensor.
@Test(expected = NullPointerException.class)
public void testRemoveNullSensor() {
String groupName = "doesNotMatter";
Map<String, String> tags = new HashMap<>();
StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags);
streamsMetrics.removeSensor(null);
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamsMetricsImplTest method testRemoveSensor.
@Test
public void testRemoveSensor() {
String groupName = "doesNotMatter";
String sensorName = "sensor1";
String scope = "scope";
String entity = "entity";
String operation = "put";
Map<String, String> tags = new HashMap<>();
StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags);
Sensor sensor1 = streamsMetrics.addSensor(sensorName, Sensor.RecordingLevel.DEBUG);
streamsMetrics.removeSensor(sensor1);
Sensor sensor1a = streamsMetrics.addSensor(sensorName, Sensor.RecordingLevel.DEBUG, sensor1);
streamsMetrics.removeSensor(sensor1a);
Sensor sensor2 = streamsMetrics.addLatencyAndThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG);
streamsMetrics.removeSensor(sensor2);
Sensor sensor3 = streamsMetrics.addThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG);
streamsMetrics.removeSensor(sensor3);
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamsMetricsImplTest method testThroughputMetrics.
@Test
public void testThroughputMetrics() {
String groupName = "doesNotMatter";
String scope = "scope";
String entity = "entity";
String operation = "put";
Map<String, String> tags = new HashMap<>();
StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags);
Sensor sensor1 = streamsMetrics.addThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG);
Map<MetricName, ? extends Metric> metrics = streamsMetrics.metrics();
// 2 metrics plus a common metric that keeps track of total registered metrics in Metrics() constructor
assertEquals(metrics.size(), 3);
streamsMetrics.removeSensor(sensor1);
metrics = streamsMetrics.metrics();
assertEquals(metrics.size(), 1);
}
Aggregations