Search in sources :

Example 16 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamThreadTest method testHandingOverTaskFromOneToAnotherThread.

@Test
public void testHandingOverTaskFromOneToAnotherThread() throws Exception {
    final TopologyBuilder builder = new TopologyBuilder();
    builder.addStateStore(Stores.create("store").withByteArrayKeys().withByteArrayValues().persistent().build());
    final StreamsConfig config = new StreamsConfig(configProps());
    final MockClientSupplier mockClientSupplier = new MockClientSupplier();
    mockClientSupplier.consumer.assign(Arrays.asList(new TopicPartition(TOPIC, 0), new TopicPartition(TOPIC, 1)));
    final StreamThread thread1 = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId + 1, processId, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    final StreamThread thread2 = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId + 2, processId, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    final Map<TaskId, Set<TopicPartition>> task0 = Collections.singletonMap(new TaskId(0, 0), task0Assignment);
    final Map<TaskId, Set<TopicPartition>> task1 = Collections.singletonMap(new TaskId(0, 1), task1Assignment);
    final Map<TaskId, Set<TopicPartition>> thread1Assignment = new HashMap<>(task0);
    final Map<TaskId, Set<TopicPartition>> thread2Assignment = new HashMap<>(task1);
    thread1.partitionAssignor(new MockStreamsPartitionAssignor(thread1Assignment));
    thread2.partitionAssignor(new MockStreamsPartitionAssignor(thread2Assignment));
    // revoke (to get threads in correct state)
    thread1.rebalanceListener.onPartitionsRevoked(Collections.EMPTY_SET);
    thread2.rebalanceListener.onPartitionsRevoked(Collections.EMPTY_SET);
    // assign
    thread1.rebalanceListener.onPartitionsAssigned(task0Assignment);
    thread2.rebalanceListener.onPartitionsAssigned(task1Assignment);
    final Set<TaskId> originalTaskAssignmentThread1 = new HashSet<>();
    for (TaskId tid : thread1.tasks().keySet()) {
        originalTaskAssignmentThread1.add(tid);
    }
    final Set<TaskId> originalTaskAssignmentThread2 = new HashSet<>();
    for (TaskId tid : thread2.tasks().keySet()) {
        originalTaskAssignmentThread2.add(tid);
    }
    // revoke (task will be suspended)
    thread1.rebalanceListener.onPartitionsRevoked(task0Assignment);
    thread2.rebalanceListener.onPartitionsRevoked(task1Assignment);
    // assign reverted
    thread1Assignment.clear();
    thread1Assignment.putAll(task1);
    thread2Assignment.clear();
    thread2Assignment.putAll(task0);
    Thread runIt = new Thread(new Runnable() {

        @Override
        public void run() {
            thread1.rebalanceListener.onPartitionsAssigned(task1Assignment);
        }
    });
    runIt.start();
    thread2.rebalanceListener.onPartitionsAssigned(task0Assignment);
    runIt.join();
    assertThat(thread1.tasks().keySet(), equalTo(originalTaskAssignmentThread2));
    assertThat(thread2.tasks().keySet(), equalTo(originalTaskAssignmentThread1));
    assertThat(thread1.prevActiveTasks(), equalTo(originalTaskAssignmentThread1));
    assertThat(thread2.prevActiveTasks(), equalTo(originalTaskAssignmentThread2));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) HashMap(java.util.HashMap) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 17 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamThreadTest method testMetrics.

@Test
public void testMetrics() throws Exception {
    TopologyBuilder builder = new TopologyBuilder().setApplicationId("MetricsApp");
    StreamsConfig config = new StreamsConfig(configProps());
    MockClientSupplier clientSupplier = new MockClientSupplier();
    Metrics metrics = new Metrics();
    StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, metrics, new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    String defaultGroupName = "stream-metrics";
    String defaultPrefix = "thread." + thread.threadClientId();
    Map<String, String> defaultTags = Collections.singletonMap("client-id", thread.threadClientId());
    assertNotNull(metrics.getSensor(defaultPrefix + ".commit-latency"));
    assertNotNull(metrics.getSensor(defaultPrefix + ".poll-latency"));
    assertNotNull(metrics.getSensor(defaultPrefix + ".process-latency"));
    assertNotNull(metrics.getSensor(defaultPrefix + ".punctuate-latency"));
    assertNotNull(metrics.getSensor(defaultPrefix + ".task-created"));
    assertNotNull(metrics.getSensor(defaultPrefix + ".task-closed"));
    assertNotNull(metrics.getSensor(defaultPrefix + ".skipped-records"));
    assertNotNull(metrics.metrics().get(metrics.metricName("commit-latency-avg", defaultGroupName, "The average commit time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("commit-latency-max", defaultGroupName, "The maximum commit time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("commit-rate", defaultGroupName, "The average per-second number of commit calls", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("poll-latency-avg", defaultGroupName, "The average poll time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("poll-latency-max", defaultGroupName, "The maximum poll time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("poll-rate", defaultGroupName, "The average per-second number of record-poll calls", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("process-latency-avg", defaultGroupName, "The average process time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("process-latency-max", defaultGroupName, "The maximum process time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("process-rate", defaultGroupName, "The average per-second number of process calls", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-latency-avg", defaultGroupName, "The average punctuate time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-latency-max", defaultGroupName, "The maximum punctuate time in ms", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-rate", defaultGroupName, "The average per-second number of punctuate calls", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("task-created-rate", defaultGroupName, "The average per-second number of newly created tasks", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("task-closed-rate", defaultGroupName, "The average per-second number of closed tasks", defaultTags)));
    assertNotNull(metrics.metrics().get(metrics.metricName("skipped-records-rate", defaultGroupName, "The average per-second number of skipped records.", defaultTags)));
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 18 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignEmptyMetadata.

@Test
public void testAssignEmptyMetadata() throws Exception {
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
    final Set<TaskId> prevTasks10 = Utils.mkSet(task0);
    final Set<TaskId> standbyTasks10 = Utils.mkSet(task1);
    final Cluster emptyMetadata = new Cluster("cluster", Collections.singletonList(Node.noNode()), Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
    UUID uuid1 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, new MockClientSupplier(), "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks10, standbyTasks10, userEndPoint).encode()));
    // initially metadata is empty
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(emptyMetadata, subscriptions);
    // check assigned partitions
    assertEquals(Collections.<TopicPartition>emptySet(), new HashSet<>(assignments.get("consumer10").partitions()));
    // check assignment info
    Set<TaskId> allActiveTasks = new HashSet<>();
    AssignmentInfo info10 = checkAssignment(Collections.<String>emptySet(), assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    assertEquals(0, allActiveTasks.size());
    assertEquals(Collections.<TaskId>emptySet(), new HashSet<>(allActiveTasks));
    // then metadata gets populated
    assignments = partitionAssignor.assign(metadata, subscriptions);
    // check assigned partitions
    assertEquals(Utils.mkSet(Utils.mkSet(t1p0, t2p0, t1p0, t2p0, t1p1, t2p1, t1p2, t2p2)), Utils.mkSet(new HashSet<>(assignments.get("consumer10").partitions())));
    // the first consumer
    info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, new HashSet<>(allActiveTasks));
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, allActiveTasks);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) Cluster(org.apache.kafka.common.Cluster) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 19 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamThreadTest method testInjectClients.

@Test
public void testInjectClients() {
    TopologyBuilder builder = new TopologyBuilder().setApplicationId("X");
    StreamsConfig config = new StreamsConfig(configProps());
    MockClientSupplier clientSupplier = new MockClientSupplier();
    StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    assertSame(clientSupplier.producer, thread.producer);
    assertSame(clientSupplier.consumer, thread.consumer);
    assertSame(clientSupplier.restoreConsumer, thread.restoreConsumer);
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 20 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskFlushDuringShutdown.

@Test
public void shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskFlushDuringShutdown() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    final MockStateStoreSupplier.MockStateStore stateStore = new MockStateStoreSupplier.MockStateStore("foo", false);
    builder.stream("t1").groupByKey().count(new MockStateStoreSupplier(stateStore));
    final StreamsConfig config = new StreamsConfig(configProps());
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {

        @Override
        public void flushState() {
            throw new RuntimeException("KABOOM!");
        }
    };
    final StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            return testStreamTask;
        }
    };
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(testStreamTask.id, testStreamTask.partitions);
    thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
    // store should have been opened
    assertTrue(stateStore.isOpen());
    thread.start();
    thread.close();
    thread.join();
    assertFalse("task shouldn't have been committed as there was an exception during shutdown", testStreamTask.committed);
    // store should be closed even if we had an exception
    assertFalse(stateStore.isOpen());
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) MockStateStoreSupplier(org.apache.kafka.test.MockStateStoreSupplier) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

MockClientSupplier (org.apache.kafka.test.MockClientSupplier)28 Test (org.junit.Test)23 TaskId (org.apache.kafka.streams.processor.TaskId)19 StreamsConfig (org.apache.kafka.streams.StreamsConfig)17 HashMap (java.util.HashMap)16 Metrics (org.apache.kafka.common.metrics.Metrics)16 MockTime (org.apache.kafka.common.utils.MockTime)16 StreamsMetrics (org.apache.kafka.streams.StreamsMetrics)14 TopicPartition (org.apache.kafka.common.TopicPartition)13 HashSet (java.util.HashSet)12 Map (java.util.Map)10 Set (java.util.Set)9 Collection (java.util.Collection)8 TopologyBuilder (org.apache.kafka.streams.processor.TopologyBuilder)8 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)7 Properties (java.util.Properties)5 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)5 PartitionInfo (org.apache.kafka.common.PartitionInfo)4 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)4 Before (org.junit.Before)4