Search in sources :

Example 16 with Metadata

use of org.apache.kafka.clients.Metadata in project kafka by apache.

the class KafkaConsumerTest method verifyHeartbeatSent.

@Test
public void verifyHeartbeatSent() throws Exception {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 1000;
    int autoCommitIntervalMs = 10000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
    // initial fetch
    client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node);
    consumer.poll(0);
    assertEquals(Collections.singleton(tp0), consumer.assignment());
    AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator);
    // heartbeat interval is 2 seconds
    time.sleep(heartbeatIntervalMs);
    Thread.sleep(heartbeatIntervalMs);
    consumer.poll(0);
    assertTrue(heartbeatReceived.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 17 with Metadata

use of org.apache.kafka.clients.Metadata in project kafka by apache.

the class KafkaProducerTest method testMetadataFetchOnStaleMetadata.

@PrepareOnlyThisForTest(Metadata.class)
@Test
public void testMetadataFetchOnStaleMetadata() throws Exception {
    Properties props = new Properties();
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
    Metadata metadata = PowerMock.createNiceMock(Metadata.class);
    MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata);
    String topic = "topic";
    ProducerRecord<String, String> initialRecord = new ProducerRecord<>(topic, "value");
    // Create a record with a partition higher than the initial (outdated) partition range
    ProducerRecord<String, String> extendedRecord = new ProducerRecord<>(topic, 2, null, "value");
    Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000));
    final Cluster emptyCluster = new Cluster(null, nodes, Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
    final Cluster initialCluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
    final Cluster extendedCluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null), new PartitionInfo(topic, 1, null, null, null), new PartitionInfo(topic, 2, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
    // Expect exactly one fetch for each attempt to refresh while topic metadata is not available
    final int refreshAttempts = 5;
    EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    producer.send(initialRecord);
    PowerMock.verify(metadata);
    // Expect exactly one fetch if topic metadata is available and records are still within range
    PowerMock.reset(metadata);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    producer.send(initialRecord, null);
    PowerMock.verify(metadata);
    // Expect exactly two fetches if topic metadata is available but metadata response still returns
    // the same partition size (either because metadata are still stale at the broker too or because
    // there weren't any partitions added in the first place).
    PowerMock.reset(metadata);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    try {
        producer.send(extendedRecord, null);
        fail("Expected KafkaException to be raised");
    } catch (KafkaException e) {
    // expected
    }
    PowerMock.verify(metadata);
    // Expect exactly two fetches if topic metadata is available but outdated for the given record
    PowerMock.reset(metadata);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andReturn(extendedCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    producer.send(extendedRecord, null);
    PowerMock.verify(metadata);
}
Also used : Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) Properties(java.util.Properties) KafkaException(org.apache.kafka.common.KafkaException) PartitionInfo(org.apache.kafka.common.PartitionInfo) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) PrepareOnlyThisForTest(org.powermock.core.classloader.annotations.PrepareOnlyThisForTest) Test(org.junit.Test) PrepareOnlyThisForTest(org.powermock.core.classloader.annotations.PrepareOnlyThisForTest)

Example 18 with Metadata

use of org.apache.kafka.clients.Metadata in project kafka by apache.

the class KafkaConsumerTest method testCommitsFetchedDuringAssign.

@Test
public void testCommitsFetchedDuringAssign() {
    long offset1 = 10000;
    long offset2 = 20000;
    int rebalanceTimeoutMs = 6000;
    int sessionTimeoutMs = 3000;
    int heartbeatIntervalMs = 2000;
    int autoCommitIntervalMs = 1000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.assign(singletonList(tp0));
    // lookup coordinator
    client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
    Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
    // fetch offset for one topic
    client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator);
    assertEquals(offset1, consumer.committed(tp0).offset());
    consumer.assign(Arrays.asList(tp0, tp1));
    // fetch offset for two topics
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(tp0, offset1);
    offsets.put(tp1, offset2);
    client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
    assertEquals(offset1, consumer.committed(tp0).offset());
    assertEquals(offset2, consumer.committed(tp1).offset());
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) GroupCoordinatorResponse(org.apache.kafka.common.requests.GroupCoordinatorResponse) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 19 with Metadata

use of org.apache.kafka.clients.Metadata in project kafka by apache.

the class KafkaConsumerTest method testAutoCommitSentBeforePositionUpdate.

@Test
public void testAutoCommitSentBeforePositionUpdate() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 3000;
    // adjust auto commit interval lower than heartbeat so we don't need to deal with
    // a concurrent heartbeat request
    int autoCommitIntervalMs = 1000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
    consumer.poll(0);
    // respond to the outstanding fetch so that we have data available on the next poll
    client.respondFrom(fetchResponse(tp0, 0, 5), node);
    client.poll(0, time.milliseconds());
    time.sleep(autoCommitIntervalMs);
    client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node);
    // no data has been returned to the user yet, so the committed offset should be 0
    AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 0);
    consumer.poll(0);
    assertTrue(commitReceived.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 20 with Metadata

use of org.apache.kafka.clients.Metadata in project kafka by apache.

the class KafkaConsumerTest method verifyNoCoordinatorLookupForManualAssignmentWithSeek.

@Test
public void verifyNoCoordinatorLookupForManualAssignmentWithSeek() {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 3000;
    int heartbeatIntervalMs = 2000;
    int autoCommitIntervalMs = 1000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
    consumer.assign(Arrays.asList(tp0));
    consumer.seekToBeginning(Arrays.asList(tp0));
    // there shouldn't be any need to lookup the coordinator or fetch committed offsets.
    // we just lookup the starting position and send the record fetch.
    client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L), Errors.NONE));
    client.prepareResponse(fetchResponse(tp0, 50L, 5));
    ConsumerRecords<String, String> records = consumer.poll(0);
    assertEquals(5, records.count());
    assertEquals(55L, consumer.position(tp0));
}
Also used : Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Aggregations

Metadata (org.apache.kafka.clients.Metadata)22 Node (org.apache.kafka.common.Node)20 MockClient (org.apache.kafka.clients.MockClient)19 Cluster (org.apache.kafka.common.Cluster)19 MockTime (org.apache.kafka.common.utils.MockTime)19 Test (org.junit.Test)17 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)16 Time (org.apache.kafka.common.utils.Time)16 HashMap (java.util.HashMap)10 LinkedHashMap (java.util.LinkedHashMap)9 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)5 TopicPartition (org.apache.kafka.common.TopicPartition)5 GroupCoordinatorResponse (org.apache.kafka.common.requests.GroupCoordinatorResponse)4 Metrics (org.apache.kafka.common.metrics.Metrics)3 Before (org.junit.Before)3 Properties (java.util.Properties)2 ClientRequest (org.apache.kafka.clients.ClientRequest)2 KafkaException (org.apache.kafka.common.KafkaException)2 PartitionInfo (org.apache.kafka.common.PartitionInfo)2 WakeupException (org.apache.kafka.common.errors.WakeupException)2