Search in sources :

Example 21 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class ProcessorTopologyTest method setup.

@Before
public void setup() {
    // Create a new directory in which we'll put all of the state for this test, enabling running tests in parallel ...
    File localState = TestUtils.tempDirectory();
    Properties props = new Properties();
    props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "processor-topology-test");
    props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9091");
    props.setProperty(StreamsConfig.STATE_DIR_CONFIG, localState.getAbsolutePath());
    props.setProperty(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    props.setProperty(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    props.setProperty(StreamsConfig.TIMESTAMP_EXTRACTOR_CLASS_CONFIG, CustomTimestampExtractor.class.getName());
    this.config = new StreamsConfig(props);
}
Also used : Properties(java.util.Properties) File(java.io.File) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Before(org.junit.Before)

Example 22 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class RegexSourceIntegrationTest method testMultipleConsumersCanReadFromPartitionedTopic.

@Test
public void testMultipleConsumersCanReadFromPartitionedTopic() throws Exception {
    final Serde<String> stringSerde = Serdes.String();
    final KStreamBuilder builderLeader = new KStreamBuilder();
    final KStreamBuilder builderFollower = new KStreamBuilder();
    final List<String> expectedAssignment = Arrays.asList(PARTITIONED_TOPIC_1, PARTITIONED_TOPIC_2);
    final KStream<String, String> partitionedStreamLeader = builderLeader.stream(Pattern.compile("partitioned-\\d"));
    final KStream<String, String> partitionedStreamFollower = builderFollower.stream(Pattern.compile("partitioned-\\d"));
    partitionedStreamLeader.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
    partitionedStreamFollower.to(stringSerde, stringSerde, DEFAULT_OUTPUT_TOPIC);
    final KafkaStreams partitionedStreamsLeader = new KafkaStreams(builderLeader, streamsConfiguration);
    final KafkaStreams partitionedStreamsFollower = new KafkaStreams(builderFollower, streamsConfiguration);
    final StreamsConfig streamsConfig = new StreamsConfig(streamsConfiguration);
    final Field leaderStreamThreadsField = partitionedStreamsLeader.getClass().getDeclaredField("threads");
    leaderStreamThreadsField.setAccessible(true);
    final StreamThread[] leaderStreamThreads = (StreamThread[]) leaderStreamThreadsField.get(partitionedStreamsLeader);
    final StreamThread originalLeaderThread = leaderStreamThreads[0];
    final TestStreamThread leaderTestStreamThread = new TestStreamThread(builderLeader, streamsConfig, new DefaultKafkaClientSupplier(), originalLeaderThread.applicationId, originalLeaderThread.clientId, originalLeaderThread.processId, new Metrics(), Time.SYSTEM);
    leaderStreamThreads[0] = leaderTestStreamThread;
    final TestCondition bothTopicsAddedToLeader = new TestCondition() {

        @Override
        public boolean conditionMet() {
            return leaderTestStreamThread.assignedTopicPartitions.equals(expectedAssignment);
        }
    };
    final Field followerStreamThreadsField = partitionedStreamsFollower.getClass().getDeclaredField("threads");
    followerStreamThreadsField.setAccessible(true);
    final StreamThread[] followerStreamThreads = (StreamThread[]) followerStreamThreadsField.get(partitionedStreamsFollower);
    final StreamThread originalFollowerThread = followerStreamThreads[0];
    final TestStreamThread followerTestStreamThread = new TestStreamThread(builderFollower, streamsConfig, new DefaultKafkaClientSupplier(), originalFollowerThread.applicationId, originalFollowerThread.clientId, originalFollowerThread.processId, new Metrics(), Time.SYSTEM);
    followerStreamThreads[0] = followerTestStreamThread;
    final TestCondition bothTopicsAddedToFollower = new TestCondition() {

        @Override
        public boolean conditionMet() {
            return followerTestStreamThread.assignedTopicPartitions.equals(expectedAssignment);
        }
    };
    partitionedStreamsLeader.start();
    TestUtils.waitForCondition(bothTopicsAddedToLeader, "Topics never assigned to leader stream");
    partitionedStreamsFollower.start();
    TestUtils.waitForCondition(bothTopicsAddedToFollower, "Topics never assigned to follower stream");
    partitionedStreamsLeader.close();
    partitionedStreamsFollower.close();
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) KafkaStreams(org.apache.kafka.streams.KafkaStreams) DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) Field(java.lang.reflect.Field) Metrics(org.apache.kafka.common.metrics.Metrics) TestCondition(org.apache.kafka.test.TestCondition) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 23 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamThreadStateStoreProviderTest method before.

@Before
public void before() throws IOException {
    final TopologyBuilder builder = new TopologyBuilder();
    builder.addSource("the-source", "the-source");
    builder.addProcessor("the-processor", new MockProcessorSupplier(), "the-source");
    builder.addStateStore(Stores.create("kv-store").withStringKeys().withStringValues().inMemory().build(), "the-processor");
    builder.addStateStore(Stores.create("window-store").withStringKeys().withStringValues().persistent().windowed(10, 10, 2, false).build(), "the-processor");
    final Properties properties = new Properties();
    final String applicationId = "applicationId";
    properties.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    stateDir = TestUtils.tempDirectory();
    final String stateConfigDir = stateDir.getPath();
    properties.put(StreamsConfig.STATE_DIR_CONFIG, stateConfigDir);
    final StreamsConfig streamsConfig = new StreamsConfig(properties);
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    configureRestoreConsumer(clientSupplier, "applicationId-kv-store-changelog");
    configureRestoreConsumer(clientSupplier, "applicationId-window-store-changelog");
    builder.setApplicationId(applicationId);
    final ProcessorTopology topology = builder.build(null);
    final Map<TaskId, StreamTask> tasks = new HashMap<>();
    stateDirectory = new StateDirectory(applicationId, stateConfigDir, new MockTime());
    taskOne = createStreamsTask(applicationId, streamsConfig, clientSupplier, topology, new TaskId(0, 0));
    tasks.put(new TaskId(0, 0), taskOne);
    taskTwo = createStreamsTask(applicationId, streamsConfig, clientSupplier, topology, new TaskId(0, 1));
    tasks.put(new TaskId(0, 1), taskTwo);
    storesAvailable = true;
    thread = new StreamThread(builder, streamsConfig, clientSupplier, applicationId, "clientId", UUID.randomUUID(), new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        public Map<TaskId, StreamTask> tasks() {
            return tasks;
        }

        @Override
        public boolean isInitialized() {
            return storesAvailable;
        }
    };
    provider = new StreamThreadStateStoreProvider(thread);
}
Also used : ProcessorTopology(org.apache.kafka.streams.processor.internals.ProcessorTopology) TaskId(org.apache.kafka.streams.processor.TaskId) TopologyBuilder(org.apache.kafka.streams.processor.TopologyBuilder) HashMap(java.util.HashMap) StreamThread(org.apache.kafka.streams.processor.internals.StreamThread) Properties(java.util.Properties) MockStreamsMetrics(org.apache.kafka.streams.processor.internals.MockStreamsMetrics) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetadataState(org.apache.kafka.streams.processor.internals.StreamsMetadataState) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) HashMap(java.util.HashMap) Map(java.util.Map) StreamTask(org.apache.kafka.streams.processor.internals.StreamTask) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) StateDirectory(org.apache.kafka.streams.processor.internals.StateDirectory) Before(org.junit.Before)

Example 24 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class NoOpProcessorContext method streamsConfig.

static StreamsConfig streamsConfig() {
    final Properties props = new Properties();
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "boot");
    return new StreamsConfig(props);
}
Also used : Properties(java.util.Properties) StreamsConfig(org.apache.kafka.streams.StreamsConfig)

Example 25 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamThreadTest method shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks.

@Test
public void shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream(Pattern.compile("t.*")).to("out");
    final StreamsConfig config = new StreamsConfig(configProps());
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    final Map<Collection<TopicPartition>, TestStreamTask> createdTasks = new HashMap<>();
    final StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            final ProcessorTopology topology = builder.build(id.topicGroupId);
            final TestStreamTask task = new TestStreamTask(id, applicationId, partitions, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
            createdTasks.put(partitions, task);
            return task;
        }
    };
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final TopicPartition t1 = new TopicPartition("t1", 0);
    final Set<TopicPartition> task00Partitions = new HashSet<>();
    task00Partitions.add(t1);
    final TaskId taskId = new TaskId(0, 0);
    activeTasks.put(taskId, task00Partitions);
    thread.partitionAssignor(new StreamPartitionAssignor() {

        @Override
        Map<TaskId, Set<TopicPartition>> activeTasks() {
            return activeTasks;
        }
    });
    // should create task for id 0_0 with a single partition
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(task00Partitions);
    final TestStreamTask firstTask = createdTasks.get(task00Partitions);
    assertThat(firstTask.id(), is(taskId));
    // update assignment for the task 0_0 so it now has 2 partitions
    task00Partitions.add(new TopicPartition("t2", 0));
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(task00Partitions);
    // should close the first task as the assignment has changed
    assertTrue("task should have been closed as assignment has changed", firstTask.closed);
    assertTrue("tasks state manager should have been closed as assignment has changed", firstTask.closedStateManager);
    // should have created a new task for 00
    assertThat(createdTasks.get(task00Partitions).id(), is(taskId));
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) Map(java.util.Map) HashMap(java.util.HashMap) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

StreamsConfig (org.apache.kafka.streams.StreamsConfig)39 Test (org.junit.Test)33 Metrics (org.apache.kafka.common.metrics.Metrics)28 HashMap (java.util.HashMap)20 TopicPartition (org.apache.kafka.common.TopicPartition)19 TaskId (org.apache.kafka.streams.processor.TaskId)19 MockTime (org.apache.kafka.common.utils.MockTime)16 StreamsMetrics (org.apache.kafka.streams.StreamsMetrics)15 MockClientSupplier (org.apache.kafka.test.MockClientSupplier)15 Properties (java.util.Properties)14 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)13 HashSet (java.util.HashSet)12 Set (java.util.Set)9 Collection (java.util.Collection)8 TopologyBuilder (org.apache.kafka.streams.processor.TopologyBuilder)8 File (java.io.File)7 Map (java.util.Map)7 UUID (java.util.UUID)7 PartitionInfo (org.apache.kafka.common.PartitionInfo)6 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)6