Search in sources :

Example 11 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class CollectionUtils method groupDataByTopic.

/**
     * group partitions by topic
     * @param partitions
     * @return partitions per topic
     */
public static Map<String, List<Integer>> groupDataByTopic(List<TopicPartition> partitions) {
    Map<String, List<Integer>> partitionsByTopic = new HashMap<>();
    for (TopicPartition tp : partitions) {
        String topic = tp.topic();
        List<Integer> topicData = partitionsByTopic.get(topic);
        if (topicData == null) {
            topicData = new ArrayList<>();
            partitionsByTopic.put(topic, topicData);
        }
        topicData.add(tp.partition());
    }
    return partitionsByTopic;
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList)

Example 12 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class ConsumerProtocolTest method serializeDeserializeAssignment.

@Test
public void serializeDeserializeAssignment() {
    List<TopicPartition> partitions = Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("bar", 2));
    ByteBuffer buffer = ConsumerProtocol.serializeAssignment(new PartitionAssignor.Assignment(partitions));
    PartitionAssignor.Assignment parsedAssignment = ConsumerProtocol.deserializeAssignment(buffer);
    assertEquals(toSet(partitions), toSet(parsedAssignment.partitions()));
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 13 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class FetcherTest method testGetOffsetsForTimesWithError.

private void testGetOffsetsForTimesWithError(Errors errorForTp0, Errors errorForTp1, long offsetForTp0, long offsetForTp1, Long expectedOffsetForTp0, Long expectedOffsetForTp1) {
    client.reset();
    TopicPartition tp0 = tp;
    TopicPartition tp1 = new TopicPartition(topicName, 1);
    // Ensure metadata has both partition.
    Cluster cluster = TestUtils.clusterWith(2, topicName, 2);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    // First try should fail due to metadata error.
    client.prepareResponseFrom(listOffsetResponse(tp0, errorForTp0, offsetForTp0, offsetForTp0), cluster.leaderFor(tp0));
    client.prepareResponseFrom(listOffsetResponse(tp1, errorForTp1, offsetForTp1, offsetForTp1), cluster.leaderFor(tp1));
    // Second try should succeed.
    client.prepareResponseFrom(listOffsetResponse(tp0, Errors.NONE, offsetForTp0, offsetForTp0), cluster.leaderFor(tp0));
    client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, offsetForTp1, offsetForTp1), cluster.leaderFor(tp1));
    Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
    timestampToSearch.put(tp0, 0L);
    timestampToSearch.put(tp1, 0L);
    Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.getOffsetsByTimes(timestampToSearch, Long.MAX_VALUE);
    if (expectedOffsetForTp0 == null)
        assertNull(offsetAndTimestampMap.get(tp0));
    else {
        assertEquals(expectedOffsetForTp0.longValue(), offsetAndTimestampMap.get(tp0).timestamp());
        assertEquals(expectedOffsetForTp0.longValue(), offsetAndTimestampMap.get(tp0).offset());
    }
    if (expectedOffsetForTp1 == null)
        assertNull(offsetAndTimestampMap.get(tp1));
    else {
        assertEquals(expectedOffsetForTp1.longValue(), offsetAndTimestampMap.get(tp1).timestamp());
        assertEquals(expectedOffsetForTp1.longValue(), offsetAndTimestampMap.get(tp1).offset());
    }
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Cluster(org.apache.kafka.common.Cluster) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp)

Example 14 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class ConsumerCoordinatorTest method testUpdateMetadataDuringRebalance.

@Test
public void testUpdateMetadataDuringRebalance() {
    final String topic1 = "topic1";
    final String topic2 = "topic2";
    TopicPartition tp1 = new TopicPartition(topic1, 0);
    TopicPartition tp2 = new TopicPartition(topic2, 0);
    final String consumerId = "leader";
    List<String> topics = Arrays.asList(topic1, topic2);
    subscriptions.subscribe(new HashSet<>(topics), rebalanceListener);
    metadata.setTopics(topics);
    // we only have metadata for one topic initially
    metadata.update(TestUtils.singletonCluster(topic1, 1), Collections.<String>emptySet(), time.milliseconds());
    client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
    coordinator.ensureCoordinatorReady();
    // prepare initial rebalance
    Map<String, List<String>> memberSubscriptions = Collections.singletonMap(consumerId, topics);
    partitionAssignor.prepare(Collections.singletonMap(consumerId, Collections.singletonList(tp1)));
    client.prepareResponse(joinGroupLeaderResponse(1, consumerId, memberSubscriptions, Errors.NONE));
    client.prepareResponse(new MockClient.RequestMatcher() {

        @Override
        public boolean matches(AbstractRequest body) {
            SyncGroupRequest sync = (SyncGroupRequest) body;
            if (sync.memberId().equals(consumerId) && sync.generationId() == 1 && sync.groupAssignment().containsKey(consumerId)) {
                // trigger the metadata update including both topics after the sync group request has been sent
                Map<String, Integer> topicPartitionCounts = new HashMap<>();
                topicPartitionCounts.put(topic1, 1);
                topicPartitionCounts.put(topic2, 1);
                metadata.update(TestUtils.singletonCluster(topicPartitionCounts), Collections.<String>emptySet(), time.milliseconds());
                return true;
            }
            return false;
        }
    }, syncGroupResponse(Collections.singletonList(tp1), Errors.NONE));
    // the metadata update should trigger a second rebalance
    client.prepareResponse(joinGroupLeaderResponse(2, consumerId, memberSubscriptions, Errors.NONE));
    client.prepareResponse(syncGroupResponse(Arrays.asList(tp1, tp2), Errors.NONE));
    coordinator.poll(time.milliseconds());
    assertFalse(coordinator.needRejoin());
    assertEquals(new HashSet<>(Arrays.asList(tp1, tp2)), subscriptions.assignedPartitions());
}
Also used : AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) SyncGroupRequest(org.apache.kafka.common.requests.SyncGroupRequest) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 15 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class ConsumerInterceptorsTest method testOnCommitChain.

@Test
public void testOnCommitChain() {
    List<ConsumerInterceptor<Integer, Integer>> interceptorList = new ArrayList<>();
    // we are testing two different interceptors by configuring the same interceptor differently, which is not
    // how it would be done in KafkaConsumer, but ok for testing interceptor callbacks
    FilterConsumerInterceptor<Integer, Integer> interceptor1 = new FilterConsumerInterceptor<>(filterPartition1);
    FilterConsumerInterceptor<Integer, Integer> interceptor2 = new FilterConsumerInterceptor<>(filterPartition2);
    interceptorList.add(interceptor1);
    interceptorList.add(interceptor2);
    ConsumerInterceptors<Integer, Integer> interceptors = new ConsumerInterceptors<>(interceptorList);
    // verify that onCommit is called for all interceptors in the chain
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    offsets.put(tp, new OffsetAndMetadata(0));
    interceptors.onCommit(offsets);
    assertEquals(2, onCommitCount);
    // verify that even if one of the interceptors throws an exception, all interceptors' onCommit are called
    interceptor1.injectOnCommitError(true);
    interceptors.onCommit(offsets);
    assertEquals(4, onCommitCount);
    interceptors.close();
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ConsumerInterceptor(org.apache.kafka.clients.consumer.ConsumerInterceptor) ArrayList(java.util.ArrayList) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Test(org.junit.Test)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)243 HashMap (java.util.HashMap)128 Test (org.junit.Test)96 Map (java.util.Map)59 ArrayList (java.util.ArrayList)45 HashSet (java.util.HashSet)43 LinkedHashMap (java.util.LinkedHashMap)39 TaskId (org.apache.kafka.streams.processor.TaskId)33 PartitionInfo (org.apache.kafka.common.PartitionInfo)30 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)28 Set (java.util.Set)27 List (java.util.List)23 Metrics (org.apache.kafka.common.metrics.Metrics)20 StreamsConfig (org.apache.kafka.streams.StreamsConfig)19 Node (org.apache.kafka.common.Node)18 Properties (java.util.Properties)17 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)17 MockTime (org.apache.kafka.common.utils.MockTime)17 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)14 Collection (java.util.Collection)13