Search in sources :

Example 6 with Cluster

use of org.apache.kafka.common.Cluster in project kafka by apache.

the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitEnabled.

@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    int autoCommitIntervalMs = 1000;
    Time time = new MockTime();
    Map<String, Integer> tpCounts = new HashMap<>();
    tpCounts.put(topic, 1);
    tpCounts.put(topic2, 1);
    Cluster cluster = TestUtils.singletonCluster(tpCounts);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    // lookup coordinator
    client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    // manual assignment
    consumer.assign(Arrays.asList(tp0));
    consumer.seekToBeginning(Arrays.asList(tp0));
    // fetch offset for one topic
    client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
    assertEquals(0, consumer.committed(tp0).offset());
    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
    // there shouldn't be any need to lookup the coordinator or fetch committed offsets.
    // we just lookup the starting position and send the record fetch.
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
    client.prepareResponse(fetchResponse(tp0, 10L, 1));
    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(1, records.count());
    assertEquals(11L, consumer.position(tp0));
    // mock the offset commit response for to be revoked partitions
    AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);
    // new manual assignment
    consumer.assign(Arrays.asList(t2p0));
    // verify that assignment immediately changes
    assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
    // verify that the offset commits occurred as expected
    assertTrue(commitReceived.get());
    client.requests().clear();
    consumer.close();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) GroupCoordinatorResponse(org.apache.kafka.common.requests.GroupCoordinatorResponse) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 7 with Cluster

use of org.apache.kafka.common.Cluster in project kafka by apache.

the class FetcherTest method testGetOffsetsForTimesWithError.

private void testGetOffsetsForTimesWithError(Errors errorForTp0, Errors errorForTp1, long offsetForTp0, long offsetForTp1, Long expectedOffsetForTp0, Long expectedOffsetForTp1) {
    client.reset();
    TopicPartition tp0 = tp;
    TopicPartition tp1 = new TopicPartition(topicName, 1);
    // Ensure metadata has both partition.
    Cluster cluster = TestUtils.clusterWith(2, topicName, 2);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    // First try should fail due to metadata error.
    client.prepareResponseFrom(listOffsetResponse(tp0, errorForTp0, offsetForTp0, offsetForTp0), cluster.leaderFor(tp0));
    client.prepareResponseFrom(listOffsetResponse(tp1, errorForTp1, offsetForTp1, offsetForTp1), cluster.leaderFor(tp1));
    // Second try should succeed.
    client.prepareResponseFrom(listOffsetResponse(tp0, Errors.NONE, offsetForTp0, offsetForTp0), cluster.leaderFor(tp0));
    client.prepareResponseFrom(listOffsetResponse(tp1, Errors.NONE, offsetForTp1, offsetForTp1), cluster.leaderFor(tp1));
    Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
    timestampToSearch.put(tp0, 0L);
    timestampToSearch.put(tp1, 0L);
    Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.getOffsetsByTimes(timestampToSearch, Long.MAX_VALUE);
    if (expectedOffsetForTp0 == null)
        assertNull(offsetAndTimestampMap.get(tp0));
    else {
        assertEquals(expectedOffsetForTp0.longValue(), offsetAndTimestampMap.get(tp0).timestamp());
        assertEquals(expectedOffsetForTp0.longValue(), offsetAndTimestampMap.get(tp0).offset());
    }
    if (expectedOffsetForTp1 == null)
        assertNull(offsetAndTimestampMap.get(tp1));
    else {
        assertEquals(expectedOffsetForTp1.longValue(), offsetAndTimestampMap.get(tp1).timestamp());
        assertEquals(expectedOffsetForTp1.longValue(), offsetAndTimestampMap.get(tp1).offset());
    }
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicPartition(org.apache.kafka.common.TopicPartition) Cluster(org.apache.kafka.common.Cluster) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp)

Example 8 with Cluster

use of org.apache.kafka.common.Cluster in project kafka by apache.

the class KafkaConsumerTest method testOffsetOfPausedPartitions.

@Test
public void testOffsetOfPausedPartitions() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    int autoCommitIntervalMs = 1000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 2);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RangeAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    // lookup coordinator
    client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    // manual assignment
    Set<TopicPartition> partitions = Utils.mkSet(tp0, tp1);
    consumer.assign(partitions);
    // verify consumer's assignment
    assertTrue(consumer.assignment().equals(partitions));
    consumer.pause(partitions);
    consumer.seekToEnd(partitions);
    // fetch and verify committed offset of two partitions
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(tp0, 0L);
    offsets.put(tp1, 0L);
    client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
    assertEquals(0, consumer.committed(tp0).offset());
    assertEquals(0, consumer.committed(tp1).offset());
    // fetch and verify consumer's position in the two partitions
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 3L), Errors.NONE));
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp1, 3L), Errors.NONE));
    assertEquals(3L, consumer.position(tp0));
    assertEquals(3L, consumer.position(tp1));
    client.requests().clear();
    consumer.unsubscribe();
    consumer.close();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) GroupCoordinatorResponse(org.apache.kafka.common.requests.GroupCoordinatorResponse) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 9 with Cluster

use of org.apache.kafka.common.Cluster in project kafka by apache.

the class KafkaConsumerTest method testRegexSubscription.

@Test
public void testRegexSubscription() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    int autoCommitIntervalMs = 1000;
    String unmatchedTopic = "unmatched";
    Time time = new MockTime();
    Map<String, Integer> topicMetadata = new HashMap<>();
    topicMetadata.put(topic, 1);
    topicMetadata.put(unmatchedTopic, 1);
    Cluster cluster = TestUtils.clusterWith(1, topicMetadata);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    Node node = cluster.nodes().get(0);
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null);
    consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer));
    client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
    consumer.poll(0);
    assertEquals(singleton(topic), consumer.subscription());
    assertEquals(singleton(tp0), consumer.assignment());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 10 with Cluster

use of org.apache.kafka.common.Cluster in project kafka by apache.

the class MetadataTest method testUpdateWithNeedMetadataForAllTopics.

@Test
public void testUpdateWithNeedMetadataForAllTopics() {
    long time = 0;
    metadata.update(Cluster.empty(), Collections.<String>emptySet(), time);
    metadata.needMetadataForAllTopics(true);
    final List<String> expectedTopics = Collections.singletonList("topic");
    metadata.setTopics(expectedTopics);
    metadata.update(new Cluster(null, Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100);
    assertArrayEquals("Metadata got updated with wrong set of topics.", expectedTopics.toArray(), metadata.topics().toArray());
    metadata.needMetadataForAllTopics(false);
}
Also used : Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.Test)

Aggregations

Cluster (org.apache.kafka.common.Cluster)37 Node (org.apache.kafka.common.Node)28 Test (org.junit.Test)27 Metadata (org.apache.kafka.clients.Metadata)19 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)18 MockClient (org.apache.kafka.clients.MockClient)17 MockTime (org.apache.kafka.common.utils.MockTime)17 Time (org.apache.kafka.common.utils.Time)16 HashMap (java.util.HashMap)14 LinkedHashMap (java.util.LinkedHashMap)12 PartitionInfo (org.apache.kafka.common.PartitionInfo)11 TopicPartition (org.apache.kafka.common.TopicPartition)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)5 KafkaException (org.apache.kafka.common.KafkaException)5 HashSet (java.util.HashSet)4 GroupCoordinatorResponse (org.apache.kafka.common.requests.GroupCoordinatorResponse)4 List (java.util.List)3 TimeoutException (org.apache.kafka.common.errors.TimeoutException)3 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)3 Metrics (org.apache.kafka.common.metrics.Metrics)3