Search in sources :

Example 1 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class KTableKTableJoinIntegrationTest method prepareTopology.

private KafkaStreams prepareTopology() {
    final KStreamBuilder builder = new KStreamBuilder();
    final KTable<String, String> table1 = builder.table(TABLE_1, TABLE_1);
    final KTable<String, String> table2 = builder.table(TABLE_2, TABLE_2);
    final KTable<String, String> table3 = builder.table(TABLE_3, TABLE_3);
    join(join(table1, table2, joinType1), table3, joinType2).to(OUTPUT);
    return new KafkaStreams(builder, new StreamsConfig(streamsConfig));
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StreamsConfig(org.apache.kafka.streams.StreamsConfig)

Example 2 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class TopologyBuilderTest method shouldThroughOnUnassignedStateStoreAccess.

@Test(expected = TopologyBuilderException.class)
public void shouldThroughOnUnassignedStateStoreAccess() {
    final String sourceNodeName = "source";
    final String goodNodeName = "goodGuy";
    final String badNodeName = "badGuy";
    final Properties config = new Properties();
    config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "host:1");
    config.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
    final StreamsConfig streamsConfig = new StreamsConfig(config);
    try {
        final TopologyBuilder builder = new TopologyBuilder();
        builder.addSource(sourceNodeName, "topic").addProcessor(goodNodeName, new LocalMockProcessorSupplier(), sourceNodeName).addStateStore(Stores.create(LocalMockProcessorSupplier.STORE_NAME).withStringKeys().withStringValues().inMemory().build(), goodNodeName).addProcessor(badNodeName, new LocalMockProcessorSupplier(), sourceNodeName);
        final ProcessorTopologyTestDriver driver = new ProcessorTopologyTestDriver(streamsConfig, builder);
        driver.process("topic", null, null);
    } catch (final StreamsException e) {
        final Throwable cause = e.getCause();
        if (cause != null && cause instanceof TopologyBuilderException && cause.getMessage().equals("Invalid topology building: Processor " + badNodeName + " has no access to StateStore " + LocalMockProcessorSupplier.STORE_NAME)) {
            throw (TopologyBuilderException) cause;
        } else {
            throw new RuntimeException("Did expect different exception. Did catch:", e);
        }
    }
}
Also used : TopologyBuilderException(org.apache.kafka.streams.errors.TopologyBuilderException) ProcessorTopologyTestDriver(org.apache.kafka.test.ProcessorTopologyTestDriver) StreamsException(org.apache.kafka.streams.errors.StreamsException) Properties(java.util.Properties) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 3 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamPartitionAssignorTest method testAssignWithStandbyReplicas.

@Test
public void testAssignWithStandbyReplicas() throws Exception {
    Properties props = configProps();
    props.setProperty(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
    StreamsConfig config = new StreamsConfig(props);
    builder.addSource("source1", "topic1");
    builder.addSource("source2", "topic2");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2");
    List<String> topics = Utils.mkList("topic1", "topic2");
    Set<TaskId> allTasks = Utils.mkSet(task0, task1, task2);
    final Set<TaskId> prevTasks00 = Utils.mkSet(task0);
    final Set<TaskId> prevTasks01 = Utils.mkSet(task1);
    final Set<TaskId> prevTasks02 = Utils.mkSet(task2);
    final Set<TaskId> standbyTasks01 = Utils.mkSet(task1);
    final Set<TaskId> standbyTasks02 = Utils.mkSet(task2);
    final Set<TaskId> standbyTasks00 = Utils.mkSet(task0);
    UUID uuid1 = UUID.randomUUID();
    UUID uuid2 = UUID.randomUUID();
    String client1 = "client1";
    StreamThread thread10 = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(thread10, "test", client1));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(thread10.config, mockClientSupplier.restoreConsumer));
    Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    subscriptions.put("consumer10", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks00, standbyTasks01, userEndPoint).encode()));
    subscriptions.put("consumer11", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid1, prevTasks01, standbyTasks02, userEndPoint).encode()));
    subscriptions.put("consumer20", new PartitionAssignor.Subscription(topics, new SubscriptionInfo(uuid2, prevTasks02, standbyTasks00, "any:9097").encode()));
    Map<String, PartitionAssignor.Assignment> assignments = partitionAssignor.assign(metadata, subscriptions);
    Set<TaskId> allActiveTasks = new HashSet<>();
    Set<TaskId> allStandbyTasks = new HashSet<>();
    // the first consumer
    AssignmentInfo info10 = checkAssignment(allTopics, assignments.get("consumer10"));
    allActiveTasks.addAll(info10.activeTasks);
    allStandbyTasks.addAll(info10.standbyTasks.keySet());
    // the second consumer
    AssignmentInfo info11 = checkAssignment(allTopics, assignments.get("consumer11"));
    allActiveTasks.addAll(info11.activeTasks);
    allStandbyTasks.addAll(info11.standbyTasks.keySet());
    assertNotEquals("same processId has same set of standby tasks", info11.standbyTasks.keySet(), info10.standbyTasks.keySet());
    // check active tasks assigned to the first client
    assertEquals(Utils.mkSet(task0, task1), new HashSet<>(allActiveTasks));
    assertEquals(Utils.mkSet(task2), new HashSet<>(allStandbyTasks));
    // the third consumer
    AssignmentInfo info20 = checkAssignment(allTopics, assignments.get("consumer20"));
    allActiveTasks.addAll(info20.activeTasks);
    allStandbyTasks.addAll(info20.standbyTasks.keySet());
    // all task ids are in the active tasks and also in the standby tasks
    assertEquals(3, allActiveTasks.size());
    assertEquals(allTasks, allActiveTasks);
    assertEquals(3, allStandbyTasks.size());
    assertEquals(allTasks, allStandbyTasks);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) Properties(java.util.Properties) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 4 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamTaskTest method shouldCheckpointOffsetsOnCommit.

@SuppressWarnings("unchecked")
@Test
public void shouldCheckpointOffsetsOnCommit() throws Exception {
    final String storeName = "test";
    final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName);
    final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) {

        @Override
        public void init(final ProcessorContext context, final StateStore root) {
            context.register(root, true, null);
        }

        @Override
        public boolean persistent() {
            return true;
        }
    };
    final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList());
    final TopicPartition partition = new TopicPartition(changelogTopic, 0);
    final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {

        @Override
        public Map<TopicPartition, Long> offsets() {
            return Collections.singletonMap(partition, 543L);
        }
    };
    restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList(new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0])));
    restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
    restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
    final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
    final TaskId taskId = new TaskId(0, 0);
    final MockTime time = new MockTime();
    final StreamsConfig config = createConfig(baseDir);
    final StreamTask streamTask = new StreamTask(taskId, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, new ThreadCache("testCache", 0, streamsMetrics), time, recordCollector);
    time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
    streamTask.commit();
    final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME));
    assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, 544L)));
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) TaskId(org.apache.kafka.streams.processor.TaskId) NoOpRecordCollector(org.apache.kafka.test.NoOpRecordCollector) StateStore(org.apache.kafka.streams.processor.StateStore) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) NoOpProcessorContext(org.apache.kafka.test.NoOpProcessorContext) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) TopicPartition(org.apache.kafka.common.TopicPartition) ThreadCache(org.apache.kafka.streams.state.internals.ThreadCache) PartitionInfo(org.apache.kafka.common.PartitionInfo) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) File(java.io.File) InMemoryKeyValueStore(org.apache.kafka.streams.state.internals.InMemoryKeyValueStore) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 5 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamThreadTest method testMaybeCommit.

@Test
public void testMaybeCommit() throws Exception {
    File baseDir = Files.createTempDirectory("test").toFile();
    try {
        final long commitInterval = 1000L;
        Properties props = configProps();
        props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath());
        props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
        StreamsConfig config = new StreamsConfig(props);
        final MockTime mockTime = new MockTime();
        TopologyBuilder builder = new TopologyBuilder().setApplicationId("X");
        builder.addSource("source1", "topic1");
        MockClientSupplier mockClientSupplier = new MockClientSupplier();
        StreamThread thread = new StreamThread(builder, config, mockClientSupplier, applicationId, clientId, processId, new Metrics(), mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

            @Override
            public void maybeCommit(long now) {
                super.maybeCommit(now);
            }

            @Override
            protected StreamTask createStreamTask(TaskId id, Collection<TopicPartition> partitionsForTask) {
                ProcessorTopology topology = builder.build(id.topicGroupId);
                return new TestStreamTask(id, applicationId, partitionsForTask, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
            }
        };
        initPartitionGrouper(config, thread, mockClientSupplier);
        ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener;
        List<TopicPartition> revokedPartitions;
        List<TopicPartition> assignedPartitions;
        //
        // Assign t1p1 and t1p2. This should create Task 1 & 2
        //
        revokedPartitions = Collections.emptyList();
        assignedPartitions = Arrays.asList(t1p1, t1p2);
        rebalanceListener.onPartitionsRevoked(revokedPartitions);
        rebalanceListener.onPartitionsAssigned(assignedPartitions);
        assertEquals(2, thread.tasks().size());
        // no task is committed before the commit interval
        mockTime.sleep(commitInterval - 10L);
        thread.maybeCommit(mockTime.milliseconds());
        for (StreamTask task : thread.tasks().values()) {
            assertFalse(((TestStreamTask) task).committed);
        }
        // all tasks are committed after the commit interval
        mockTime.sleep(11L);
        thread.maybeCommit(mockTime.milliseconds());
        for (StreamTask task : thread.tasks().values()) {
            assertTrue(((TestStreamTask) task).committed);
            ((TestStreamTask) task).committed = false;
        }
        // no task is committed before the commit interval, again
        mockTime.sleep(commitInterval - 10L);
        thread.maybeCommit(mockTime.milliseconds());
        for (StreamTask task : thread.tasks().values()) {
            assertFalse(((TestStreamTask) task).committed);
        }
        // all tasks are committed after the commit interval, again
        mockTime.sleep(11L);
        thread.maybeCommit(mockTime.milliseconds());
        for (StreamTask task : thread.tasks().values()) {
            assertTrue(((TestStreamTask) task).committed);
            ((TestStreamTask) task).committed = false;
        }
    } finally {
        Utils.delete(baseDir);
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) Properties(java.util.Properties) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) File(java.io.File) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

StreamsConfig (org.apache.kafka.streams.StreamsConfig)219 Test (org.junit.Test)173 Properties (java.util.Properties)84 HashMap (java.util.HashMap)69 TopicPartition (org.apache.kafka.common.TopicPartition)66 TaskId (org.apache.kafka.streams.processor.TaskId)54 MockTime (org.apache.kafka.common.utils.MockTime)53 Set (java.util.Set)36 ArrayList (java.util.ArrayList)33 HashSet (java.util.HashSet)33 Metrics (org.apache.kafka.common.metrics.Metrics)33 File (java.io.File)32 AdminClient (org.apache.kafka.clients.admin.AdminClient)31 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)31 LogContext (org.apache.kafka.common.utils.LogContext)31 Map (java.util.Map)30 TimeoutException (org.apache.kafka.common.errors.TimeoutException)30 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)30 Before (org.junit.Before)27 List (java.util.List)22