Search in sources :

Example 11 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StandbyTaskTest method testUpdateKTable.

@SuppressWarnings("unchecked")
@Test
public void testUpdateKTable() throws Exception {
    consumer.assign(Utils.mkList(ktable));
    Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(0L));
    consumer.commitSync(committedOffsets);
    restoreStateConsumer.updatePartitions("ktable1", Utils.mkList(new PartitionInfo("ktable1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 2, Node.noNode(), new Node[0], new Node[0])));
    StreamsConfig config = createConfig(baseDir);
    StandbyTask task = new StandbyTask(taskId, applicationId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory);
    restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
    for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(ktable.topic(), ktable.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 4, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 50, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 5, 100))) {
        restoreStateConsumer.bufferRecord(record);
    }
    for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
        TopicPartition partition = entry.getKey();
        long offset = entry.getValue();
        if (offset >= 0) {
            restoreStateConsumer.seek(partition, offset);
        } else {
            restoreStateConsumer.seekToBeginning(singleton(partition));
        }
    }
    // The commit offset is at 0L. Records should not be processed
    List<ConsumerRecord<byte[], byte[]>> remaining = task.update(ktable, restoreStateConsumer.poll(100).records(ktable));
    assertEquals(5, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(10L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset has not reached, yet.
    remaining = task.update(ktable, remaining);
    assertEquals(5, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(11L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // one record should be processed.
    remaining = task.update(ktable, remaining);
    assertEquals(4, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(45L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 45. All record except for the last one should be processed.
    remaining = task.update(ktable, remaining);
    assertEquals(1, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(50L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 50. Still the last record remains.
    remaining = task.update(ktable, remaining);
    assertEquals(1, remaining.size());
    committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(60L));
    consumer.commitSync(committedOffsets);
    // update offset limits
    task.commit();
    // The commit offset is now 60. No record should be left.
    remaining = task.update(ktable, remaining);
    assertNull(remaining);
    task.closeStateManager(true);
    File taskDir = stateDirectory.directoryForTask(taskId);
    OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
    Map<TopicPartition, Long> offsets = checkpoint.read();
    assertEquals(1, offsets.size());
    assertEquals(new Long(51L), offsets.get(ktable));
}
Also used : OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) HashMap(java.util.HashMap) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) PartitionInfo(org.apache.kafka.common.PartitionInfo) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 12 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StreamPartitionAssignorTest method shouldSetClusterMetadataOnAssignment.

@Test
public void shouldSetClusterMetadataOnAssignment() throws Exception {
    final List<TopicPartition> topic = Collections.singletonList(new TopicPartition("topic", 0));
    final Map<HostInfo, Set<TopicPartition>> hostState = Collections.singletonMap(new HostInfo("localhost", 80), Collections.singleton(new TopicPartition("topic", 0)));
    final AssignmentInfo assignmentInfo = new AssignmentInfo(Collections.singletonList(new TaskId(0, 0)), Collections.<TaskId, Set<TopicPartition>>emptyMap(), hostState);
    partitionAssignor.onAssignment(new PartitionAssignor.Assignment(topic, assignmentInfo.encode()));
    final Cluster cluster = partitionAssignor.clusterMetadata();
    final List<PartitionInfo> partitionInfos = cluster.partitionsForTopic("topic");
    final PartitionInfo partitionInfo = partitionInfos.get(0);
    assertEquals(1, partitionInfos.size());
    assertEquals("topic", partitionInfo.topic());
    assertEquals(0, partitionInfo.partition());
}
Also used : AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) HashSet(java.util.HashSet) Set(java.util.Set) TaskId(org.apache.kafka.streams.processor.TaskId) TopicPartition(org.apache.kafka.common.TopicPartition) Cluster(org.apache.kafka.common.Cluster) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) PartitionInfo(org.apache.kafka.common.PartitionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) Test(org.junit.Test)

Example 13 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StoreChangelogReaderTest method shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList.

@SuppressWarnings("unchecked")
@Test
public void shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList() throws Exception {
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {

        @Override
        public List<PartitionInfo> partitionsFor(final String topic) {
            return Collections.singletonList(partitionInfo);
        }
    };
    final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 10);
    changelogReader.validatePartitionExists(topicPartition, "store");
}
Also used : PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 14 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StoreChangelogReaderTest method shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor.

@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor() throws Exception {
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) {

        @Override
        public List<PartitionInfo> partitionsFor(final String topic) {
            throw new TimeoutException("KABOOM!");
        }
    };
    final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 5);
    try {
        changelogReader.validatePartitionExists(topicPartition, "store");
        fail("Should have thrown streams exception");
    } catch (final StreamsException e) {
    // pass
    }
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) MockTime(org.apache.kafka.common.utils.MockTime) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 15 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StreamsMetadataStateTest method shouldGetInstanceWithKeyAndCustomPartitioner.

@Test
public void shouldGetInstanceWithKeyAndCustomPartitioner() throws Exception {
    final TopicPartition tp4 = new TopicPartition("topic-three", 1);
    hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, tp4));
    discovery.onChange(hostToPartitions, cluster.withPartitions(Collections.singletonMap(tp4, new PartitionInfo("topic-three", 1, null, null, null))));
    final StreamsMetadata expected = new StreamsMetadata(hostTwo, Utils.mkSet(globalTable, "table-two", "table-three", "merged-table"), Utils.mkSet(topic2P0, tp4));
    StreamsMetadata actual = discovery.getMetadataWithKey("table-three", "the-key", partitioner);
    assertEquals(expected, actual);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetadata(org.apache.kafka.streams.state.StreamsMetadata) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.Test)

Aggregations

PartitionInfo (org.apache.kafka.common.PartitionInfo)49 TopicPartition (org.apache.kafka.common.TopicPartition)30 Test (org.junit.Test)23 HashMap (java.util.HashMap)17 ArrayList (java.util.ArrayList)15 Node (org.apache.kafka.common.Node)12 Map (java.util.Map)11 Cluster (org.apache.kafka.common.Cluster)11 HashSet (java.util.HashSet)10 Set (java.util.Set)7 TaskId (org.apache.kafka.streams.processor.TaskId)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)6 MockTime (org.apache.kafka.common.utils.MockTime)5 List (java.util.List)4 Properties (java.util.Properties)4 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)4 HostInfo (org.apache.kafka.streams.state.HostInfo)4 StreamsMetadata (org.apache.kafka.streams.state.StreamsMetadata)4 File (java.io.File)3 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)3