Search in sources :

Example 16 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StandbyTaskTest method testUpdate.

@SuppressWarnings("unchecked")
@Test
public void testUpdate() throws Exception {
    StreamsConfig config = createConfig(baseDir);
    StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory);
    restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
    for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(partition2.topic(), partition2.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100))) {
        restoreStateConsumer.bufferRecord(record);
    }
    for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
        TopicPartition partition = entry.getKey();
        long offset = entry.getValue();
        if (offset >= 0) {
            restoreStateConsumer.seek(partition, offset);
        } else {
            restoreStateConsumer.seekToBeginning(singleton(partition));
        }
    }
    task.update(partition2, restoreStateConsumer.poll(100).records(partition2));
    StandbyContextImpl context = (StandbyContextImpl) task.context();
    MockStateStoreSupplier.MockStateStore store1 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName1);
    MockStateStoreSupplier.MockStateStore store2 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName2);
    assertEquals(Collections.emptyList(), store1.keys);
    assertEquals(Utils.mkList(1, 2, 3), store2.keys);
    task.closeStateManager(true);
    File taskDir = stateDirectory.directoryForTask(taskId);
    OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
    Map<TopicPartition, Long> offsets = checkpoint.read();
    assertEquals(1, offsets.size());
    assertEquals(new Long(30L + 1L), offsets.get(partition2));
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) TopicPartition(org.apache.kafka.common.TopicPartition) MockStateStoreSupplier(org.apache.kafka.test.MockStateStoreSupplier) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 17 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StandbyTaskTest method testUpdateKTable.

@SuppressWarnings("unchecked")
@Test
public void testUpdateKTable() throws Exception {
    consumer.assign(Utils.mkList(ktable));
    Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(0L));
    consumer.commitSync(committedOffsets);
    restoreStateConsumer.updatePartitions("ktable1", Utils.mkList(new PartitionInfo("ktable1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 2, Node.noNode(), new Node[0], new Node[0])));
    StreamsConfig config = createConfig(baseDir);
    StandbyTask task = new StandbyTask(taskId, applicationId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory);
    restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
    for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(ktable.topic(), ktable.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 4, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 50, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 5, 100))) {
        restoreStateConsumer.bufferRecord(record);
    }
    for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
        TopicPartition partition = entry.getKey();
        long offset = entry.getValue();
        if (offset >= 0) {
            restoreStateConsumer.seek(partition, offset);
        } else {
            restoreStateConsumer.seekToBeginning(singleton(partition));
        }
    }
    // The commit offset is at 0L. Records should not be processed
    List<ConsumerRecord<byte[], byte[]>> remaining = task.update(ktable, restoreStateConsumer.poll(100).records(ktable));
    assertEquals(5, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(10L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset has not reached, yet.
    remaining = task.update(ktable, remaining);
    assertEquals(5, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(11L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // one record should be processed.
    remaining = task.update(ktable, remaining);
    assertEquals(4, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(45L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 45. All record except for the last one should be processed.
    remaining = task.update(ktable, remaining);
    assertEquals(1, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(50L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 50. Still the last record remains.
    remaining = task.update(ktable, remaining);
    assertEquals(1, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(60L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 60. No record should be left.
    remaining = task.update(ktable, remaining);
    assertNull(remaining);
    task.closeStateManager(true);
    File taskDir = stateDirectory.directoryForTask(taskId);
    OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
    Map<TopicPartition, Long> offsets = checkpoint.read();
    assertEquals(1, offsets.size());
    assertEquals(new Long(51L), offsets.get(ktable));
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) HashMap(java.util.HashMap) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) PartitionInfo(org.apache.kafka.common.PartitionInfo) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 18 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamPartitionAssignorTest method shouldAddUserDefinedEndPointToSubscription.

@Test
public void shouldAddUserDefinedEndPointToSubscription() throws Exception {
    final Properties properties = configProps();
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:8080");
    final StreamsConfig config = new StreamsConfig(properties);
    final String applicationId = "application-id";
    builder.setApplicationId(applicationId);
    builder.addSource("source", "input");
    builder.addProcessor("processor", new MockProcessorSupplier(), "source");
    builder.addSink("sink", "output", "processor");
    final UUID uuid1 = UUID.randomUUID();
    final String client1 = "client1";
    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
    final PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("input"));
    final SubscriptionInfo subscriptionInfo = SubscriptionInfo.decode(subscription.userData());
    assertEquals("localhost:8080", subscriptionInfo.userEndPoint);
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) MockProcessorSupplier(org.apache.kafka.test.MockProcessorSupplier) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) Properties(java.util.Properties) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 19 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamPartitionAssignorTest method shouldNotAddStandbyTaskPartitionsToPartitionsForHost.

@Test
public void shouldNotAddStandbyTaskPartitionsToPartitionsForHost() throws Exception {
    final Properties props = configProps();
    props.setProperty(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "1");
    final StreamsConfig config = new StreamsConfig(props);
    final KStreamBuilder builder = new KStreamBuilder();
    final String applicationId = "appId";
    builder.setApplicationId(applicationId);
    builder.stream("topic1").groupByKey().count("count");
    final UUID uuid = UUID.randomUUID();
    final String client = "client1";
    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client));
    partitionAssignor.setInternalTopicManager(new MockInternalTopicManager(streamThread.config, mockClientSupplier.restoreConsumer));
    final Map<String, PartitionAssignor.Subscription> subscriptions = new HashMap<>();
    final Set<TaskId> emptyTasks = Collections.emptySet();
    subscriptions.put("consumer1", new PartitionAssignor.Subscription(Collections.singletonList("topic1"), new SubscriptionInfo(uuid, emptyTasks, emptyTasks, userEndPoint).encode()));
    subscriptions.put("consumer2", new PartitionAssignor.Subscription(Collections.singletonList("topic1"), new SubscriptionInfo(UUID.randomUUID(), emptyTasks, emptyTasks, "other:9090").encode()));
    final Set<TopicPartition> allPartitions = Utils.mkSet(t1p0, t1p1, t1p2);
    final Map<String, PartitionAssignor.Assignment> assign = partitionAssignor.assign(metadata, subscriptions);
    final PartitionAssignor.Assignment consumer1Assignment = assign.get("consumer1");
    final AssignmentInfo assignmentInfo = AssignmentInfo.decode(consumer1Assignment.userData());
    final Set<TopicPartition> consumer1partitions = assignmentInfo.partitionsByHost.get(new HostInfo("localhost", 2171));
    final Set<TopicPartition> consumer2Partitions = assignmentInfo.partitionsByHost.get(new HostInfo("other", 9090));
    final HashSet<TopicPartition> allAssignedPartitions = new HashSet<>(consumer1partitions);
    allAssignedPartitions.addAll(consumer2Partitions);
    assertThat(consumer1partitions, not(allPartitions));
    assertThat(consumer2Partitions, not(allPartitions));
    assertThat(allAssignedPartitions, equalTo(allPartitions));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) HashMap(java.util.HashMap) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) Properties(java.util.Properties) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Metrics(org.apache.kafka.common.metrics.Metrics) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) TopicPartition(org.apache.kafka.common.TopicPartition) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Example 20 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamPartitionAssignorTest method shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger.

@Test
public void shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger() throws Exception {
    final Properties properties = configProps();
    final String myEndPoint = "localhost:j87yhk";
    properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint);
    final StreamsConfig config = new StreamsConfig(properties);
    final UUID uuid1 = UUID.randomUUID();
    final String client1 = "client1";
    final String applicationId = "application-id";
    builder.setApplicationId(applicationId);
    final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0);
    try {
        partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1));
        Assert.fail("expected to an exception due to invalid config");
    } catch (ConfigException e) {
    // pass
    }
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) ConfigException(org.apache.kafka.common.config.ConfigException) Properties(java.util.Properties) UUID(java.util.UUID) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

StreamsConfig (org.apache.kafka.streams.StreamsConfig)39 Test (org.junit.Test)33 Metrics (org.apache.kafka.common.metrics.Metrics)28 HashMap (java.util.HashMap)20 TopicPartition (org.apache.kafka.common.TopicPartition)19 TaskId (org.apache.kafka.streams.processor.TaskId)19 MockTime (org.apache.kafka.common.utils.MockTime)16 StreamsMetrics (org.apache.kafka.streams.StreamsMetrics)15 MockClientSupplier (org.apache.kafka.test.MockClientSupplier)15 Properties (java.util.Properties)14 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)13 HashSet (java.util.HashSet)12 Set (java.util.Set)9 Collection (java.util.Collection)8 TopologyBuilder (org.apache.kafka.streams.processor.TopologyBuilder)8 File (java.io.File)7 Map (java.util.Map)7 UUID (java.util.UUID)7 PartitionInfo (org.apache.kafka.common.PartitionInfo)6 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)6