use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class ProcessorNodeTest method testMetrics.
@Test
public void testMetrics() {
final StateSerdes anyStateSerde = StateSerdes.withBuiltinTypes("anyName", Bytes.class, Bytes.class);
final MockProcessorContext context = new MockProcessorContext(anyStateSerde, new RecordCollectorImpl(null, null));
final ProcessorNode node = new ProcessorNode("name", new NoOpProcessor(), Collections.emptySet());
node.init(context);
Metrics metrics = context.baseMetrics();
String name = "task." + context.taskId() + "." + node.name();
String[] entities = { "all", name };
String[] latencyOperations = { "process", "punctuate", "create", "destroy" };
String throughputOperation = "forward";
String groupName = "stream-processor-node-metrics";
Map<String, String> tags = Collections.singletonMap("processor-node-id", node.name());
for (String operation : latencyOperations) {
assertNotNull(metrics.getSensor(operation));
assertNotNull(metrics.getSensor(name + "-" + operation));
}
assertNotNull(metrics.getSensor(throughputOperation));
for (String entity : entities) {
for (String operation : latencyOperations) {
assertNotNull(metrics.metrics().get(metrics.metricName(entity + "-" + operation + "-latency-avg", groupName, "The average latency in milliseconds of " + entity + " " + operation + " operation.", tags)));
assertNotNull(metrics.metrics().get(metrics.metricName(entity + "-" + operation + "-latency-max", groupName, "The max latency in milliseconds of " + entity + " " + operation + " operation.", tags)));
assertNotNull(metrics.metrics().get(metrics.metricName(entity + "-" + operation + "-rate", groupName, "The average number of occurrence of " + entity + " " + operation + " operation per second.", tags)));
}
assertNotNull(metrics.metrics().get(metrics.metricName(entity + "-" + throughputOperation + "-rate", groupName, "The average number of occurrence of " + entity + " " + throughputOperation + " operation per second.", tags)));
}
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamPartitionAssignorTest method testOnAssignment.
@Test
public void testOnAssignment() throws Exception {
TopicPartition t2p3 = new TopicPartition("topic2", 3);
TopologyBuilder builder = new TopologyBuilder();
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
UUID uuid = UUID.randomUUID();
String client1 = "client1";
StreamThread thread = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread, "test", client1));
List<TaskId> activeTaskList = Utils.mkList(task0, task3);
Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
activeTasks.put(task0, Utils.mkSet(t1p0));
activeTasks.put(task3, Utils.mkSet(t2p3));
standbyTasks.put(task1, Utils.mkSet(t1p0));
standbyTasks.put(task2, Utils.mkSet(t2p0));
AssignmentInfo info = new AssignmentInfo(activeTaskList, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>());
PartitionAssignor.Assignment assignment = new PartitionAssignor.Assignment(Utils.mkList(t1p0, t2p3), info.encode());
partitionAssignor.onAssignment(assignment);
assertEquals(activeTasks, partitionAssignor.activeTasks());
assertEquals(standbyTasks, partitionAssignor.standbyTasks());
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamPartitionAssignorTest method testAssignWithStandbyReplicas.
@Test
public void testAssignWithStandbyReplicas() throws Exception {
Properties props = configProps();
props.setProperty(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
StreamsConfig config = new StreamsConfig(props);
builder.addSource("source1", "topic1");
builder.addSource("source2", "topic2");
builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
List<String> topics = Utils.mkList("topic1", "topic2");
Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
final Set<TaskId> prevTasks00 = Utils.mkSet(task0);
final Set<TaskId> prevTasks01 = Utils.mkSet(task1);
final Set<TaskId> prevTasks02 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks01 = Utils.mkSet(task1);
final Set<TaskId> standbyTasks02 = Utils.mkSet(task2);
final Set<TaskId> standbyTasks00 = Utils.mkSet(task0);
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
String client1 = "client1";
StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks00, standbyTasks01, userEndPoint).encode()));
subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks01, standbyTasks02, userEndPoint).encode()));
subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks02, standbyTasks00, "any:9097").encode()));
Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
Set<TaskId> allActiveTasks = new HashSet<>();
Set<TaskId> allStandbyTasks = new HashSet<>();
// the first consumer
AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
allActiveTasks.addAll(info10.activeTasks);
allStandbyTasks.addAll(info10.standbyTasks.keySet());
// the second consumer
AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
allActiveTasks.addAll(info11.activeTasks);
allStandbyTasks.addAll(info11.standbyTasks.keySet());
assertNotEquals("same processId has same set of standby tasks", info11.standbyTasks.keySet(), info10.standbyTasks.keySet());
// check active tasks assigned to the first client
assertEquals(Utils.mkSet(task0, task1), new HashSet<>(allActiveTasks));
assertEquals(Utils.mkSet(task2), new HashSet<>(allStandbyTasks));
// the third consumer
AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
allActiveTasks.addAll(info20.activeTasks);
allStandbyTasks.addAll(info20.standbyTasks.keySet());
// all task ids are in the active tasks and also in the standby tasks
assertEquals(3, allActiveTasks.size());
assertEquals(allTasks, allActiveTasks);
assertEquals(3, allStandbyTasks.size());
assertEquals(allTasks, allStandbyTasks);
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamTaskTest method shouldCheckpointOffsetsOnCommit.
@SuppressWarnings("unchecked")
@Test
public void shouldCheckpointOffsetsOnCommit() throws Exception {
final String storeName = "test";
final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName);
final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) {
@Override
public void init(final ProcessorContext context, final StateStore root) {
context.register(root, true, null);
}
@Override
public boolean persistent() {
return true;
}
};
final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList());
final TopicPartition partition = new TopicPartition(changelogTopic, 0);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public Map<TopicPartition, Long> offsets() {
return Collections.singletonMap(partition, 543L);
}
};
restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList(new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0])));
restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final TaskId taskId = new TaskId(0, 0);
final MockTime time = new MockTime();
final StreamsConfig config = createConfig(baseDir);
final StreamTask streamTask = new StreamTask(taskId, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, new ThreadCache("testCache", 0, streamsMetrics), time, recordCollector);
time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
streamTask.commit();
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME));
assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, 544L)));
}
use of org.apache.kafka.common.metrics.Metrics in project kafka by apache.
the class StreamTaskTest method shouldFlushRecordCollectorOnFlushState.
@Test
public void shouldFlushRecordCollectorOnFlushState() throws Exception {
final AtomicBoolean flushed = new AtomicBoolean(false);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public void flush() {
flushed.set(true);
}
};
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final StreamTask streamTask = new StreamTask(taskId00, "appId", partitions, topology, consumer, changelogReader, createConfig(baseDir), streamsMetrics, stateDirectory, testCache, time, recordCollector);
streamTask.flushState();
assertTrue(flushed.get());
}
Aggregations