Search in sources :

Example 66 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project storm by apache.

the class PatternTopicFilter method getAllSubscribedPartitions.

@Override
public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) {
    topics.clear();
    Set<TopicPartition> allPartitions = new HashSet<>();
    for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) {
        if (pattern.matcher(entry.getKey()).matches()) {
            for (PartitionInfo partitionInfo : entry.getValue()) {
                allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
                topics.add(partitionInfo.topic());
            }
        }
    }
    return allPartitions;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) PartitionInfo(org.apache.kafka.common.PartitionInfo) Map(java.util.Map) HashSet(java.util.HashSet)

Example 67 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project flink by apache.

the class FlinkKafkaProducerBaseTest method testPartitionerInvokedWithDeterminatePartitionList.

/**
 * Tests that partitions list is determinate and correctly provided to custom partitioner.
 */
@SuppressWarnings("unchecked")
@Test
public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception {
    FlinkKafkaPartitioner<String> mockPartitioner = mock(FlinkKafkaPartitioner.class);
    RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
    when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0);
    when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1);
    // out-of-order list of 4 partitions
    List<PartitionInfo> mockPartitionsList = new ArrayList<>(4);
    mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null));
    mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 1, null, null, null));
    mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 0, null, null, null));
    mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 2, null, null, null));
    final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), mockPartitioner);
    producer.setRuntimeContext(mockRuntimeContext);
    final KafkaProducer mockProducer = producer.getMockKafkaProducer();
    when(mockProducer.partitionsFor(anyString())).thenReturn(mockPartitionsList);
    when(mockProducer.metrics()).thenReturn(null);
    producer.open(new Configuration());
    verify(mockPartitioner, times(1)).open(0, 1);
    producer.invoke("foobar", SinkContextUtil.forTimestamp(0));
    verify(mockPartitioner, times(1)).partition("foobar", null, "foobar".getBytes(), DummyFlinkKafkaProducer.DUMMY_TOPIC, new int[] { 0, 1, 2, 3 });
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Configuration(org.apache.flink.configuration.Configuration) ArrayList(java.util.ArrayList) SimpleStringSchema(org.apache.flink.api.common.serialization.SimpleStringSchema) Mockito.anyString(org.mockito.Mockito.anyString) PartitionInfo(org.apache.kafka.common.PartitionInfo) RuntimeContext(org.apache.flink.api.common.functions.RuntimeContext) StreamingRuntimeContext(org.apache.flink.streaming.api.operators.StreamingRuntimeContext) Test(org.junit.Test)

Example 68 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class MockProducerTest method testPartitioner.

@Test
public void testPartitioner() throws Exception {
    PartitionInfo partitionInfo0 = new PartitionInfo(topic, 0, null, null, null);
    PartitionInfo partitionInfo1 = new PartitionInfo(topic, 1, null, null, null);
    Cluster cluster = new Cluster(null, new ArrayList<>(0), asList(partitionInfo0, partitionInfo1), Collections.emptySet(), Collections.emptySet());
    MockProducer<String, String> producer = new MockProducer<>(cluster, true, new DefaultPartitioner(), new StringSerializer(), new StringSerializer());
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, "key", "value");
    Future<RecordMetadata> metadata = producer.send(record);
    assertEquals(1, metadata.get().partition(), "Partition should be correct");
    producer.clear();
    assertEquals(0, producer.history().size(), "Clear should erase our history");
    producer.close();
}
Also used : DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.jupiter.api.Test)

Example 69 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class UniformStickyPartitionerTest method testRoundRobinWithUnavailablePartitions.

@Test
public void testRoundRobinWithUnavailablePartitions() {
    // Intentionally make the partition list not in partition order to test the edge
    // cases.
    List<PartitionInfo> partitions = asList(new PartitionInfo("test", 1, null, NODES, NODES), new PartitionInfo("test", 2, NODES[1], NODES, NODES), new PartitionInfo("test", 0, NODES[0], NODES, NODES));
    // When there are some unavailable partitions, we want to make sure that (1) we
    // always pick an available partition,
    // and (2) the available partitions are selected in a sticky way.
    int countForPart0 = 0;
    int countForPart2 = 0;
    int part = 0;
    Partitioner partitioner = new UniformStickyPartitioner();
    Cluster cluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), partitions, Collections.<String>emptySet(), Collections.<String>emptySet());
    for (int i = 0; i < 50; i++) {
        part = partitioner.partition("test", null, null, null, null, cluster);
        assertTrue(part == 0 || part == 2, "We should never choose a leader-less node in round robin");
        if (part == 0)
            countForPart0++;
        else
            countForPart2++;
    }
    // Simulates switching the sticky partition on a new batch.
    partitioner.onNewBatch("test", cluster, part);
    for (int i = 1; i <= 50; i++) {
        part = partitioner.partition("test", null, null, null, null, cluster);
        assertTrue(part == 0 || part == 2, "We should never choose a leader-less node in round robin");
        if (part == 0)
            countForPart0++;
        else
            countForPart2++;
    }
    assertEquals(countForPart0, countForPart2, "The distribution between two available partitions should be even");
}
Also used : Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.jupiter.api.Test)

Example 70 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class UniformStickyPartitionerTest method testRoundRobinWithKeyBytes.

@Test
public void testRoundRobinWithKeyBytes() throws InterruptedException {
    List<PartitionInfo> allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), new PartitionInfo(TOPIC_A, 1, NODES[1], NODES, NODES), new PartitionInfo(TOPIC_A, 2, NODES[1], NODES, NODES), new PartitionInfo(TOPIC_B, 0, NODES[0], NODES, NODES));
    Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, Collections.<String>emptySet(), Collections.<String>emptySet());
    final Map<Integer, Integer> partitionCount = new HashMap<>();
    final byte[] keyBytes = "key".getBytes();
    int partition = 0;
    Partitioner partitioner = new UniformStickyPartitioner();
    for (int i = 0; i < 30; ++i) {
        partition = partitioner.partition(TOPIC_A, null, keyBytes, null, null, testCluster);
        Integer count = partitionCount.get(partition);
        if (null == count)
            count = 0;
        partitionCount.put(partition, count + 1);
        if (i % 5 == 0) {
            partitioner.partition(TOPIC_B, null, keyBytes, null, null, testCluster);
        }
    }
    // Simulate a batch filling up and switching the sticky partition.
    partitioner.onNewBatch(TOPIC_A, testCluster, partition);
    partitioner.onNewBatch(TOPIC_B, testCluster, 0);
    // Save old partition to ensure that the wrong partition does not trigger a new batch.
    int oldPart = partition;
    for (int i = 0; i < 30; ++i) {
        partition = partitioner.partition(TOPIC_A, null, keyBytes, null, null, testCluster);
        Integer count = partitionCount.get(partition);
        if (null == count)
            count = 0;
        partitionCount.put(partition, count + 1);
        if (i % 5 == 0) {
            partitioner.partition(TOPIC_B, null, keyBytes, null, null, testCluster);
        }
    }
    int newPart = partition;
    // Attempt to switch the partition with the wrong previous partition. Sticky partition should not change.
    partitioner.onNewBatch(TOPIC_A, testCluster, oldPart);
    for (int i = 0; i < 30; ++i) {
        partition = partitioner.partition(TOPIC_A, null, keyBytes, null, null, testCluster);
        Integer count = partitionCount.get(partition);
        if (null == count)
            count = 0;
        partitionCount.put(partition, count + 1);
        if (i % 5 == 0) {
            partitioner.partition(TOPIC_B, null, keyBytes, null, null, testCluster);
        }
    }
    assertEquals(30, partitionCount.get(oldPart).intValue());
    assertEquals(60, partitionCount.get(newPart).intValue());
}
Also used : HashMap(java.util.HashMap) Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.jupiter.api.Test)

Aggregations

PartitionInfo (org.apache.kafka.common.PartitionInfo)227 TopicPartition (org.apache.kafka.common.TopicPartition)142 HashMap (java.util.HashMap)87 Node (org.apache.kafka.common.Node)85 Test (org.junit.Test)82 Cluster (org.apache.kafka.common.Cluster)80 ArrayList (java.util.ArrayList)73 HashSet (java.util.HashSet)67 Set (java.util.Set)38 Map (java.util.Map)34 Test (org.junit.jupiter.api.Test)31 List (java.util.List)30 TaskId (org.apache.kafka.streams.processor.TaskId)25 StreamsConfig (org.apache.kafka.streams.StreamsConfig)16 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)16 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)15 Properties (java.util.Properties)13 MockTime (org.apache.kafka.common.utils.MockTime)13 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)11 HostInfo (org.apache.kafka.streams.state.HostInfo)11