use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitEnabled.
@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
// lookup coordinator
client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(Arrays.asList(tp0));
consumer.seekToBeginning(Arrays.asList(tp0));
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
assertEquals(0, consumer.committed(tp0).offset());
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// mock the offset commit response for to be revoked partitions
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);
// new manual assignment
consumer.assign(Arrays.asList(t2p0));
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
client.requests().clear();
consumer.close();
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testOffsetOfPausedPartitions.
@Test
public void testOffsetOfPausedPartitions() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 2);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
// lookup coordinator
client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
Set<TopicPartition> partitions = Utils.mkSet(tp0, tp1);
consumer.assign(partitions);
// verify consumer's assignment
assertTrue(consumer.assignment().equals(partitions));
consumer.pause(partitions);
consumer.seekToEnd(partitions);
// fetch and verify committed offset of two partitions
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp0, 0L);
offsets.put(tp1, 0L);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(0, consumer.committed(tp0).offset());
assertEquals(0, consumer.committed(tp1).offset());
// fetch and verify consumer's position in the two partitions
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 3L), Errors.NONE));
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp1, 3L), Errors.NONE));
assertEquals(3L, consumer.position(tp0));
assertEquals(3L, consumer.position(tp1));
client.requests().clear();
consumer.unsubscribe();
consumer.close();
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testRegexSubscription.
@Test
public void testRegexSubscription() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
String unmatchedTopic = "unmatched";
Time time = new MockTime();
Map<String, Integer> topicMetadata = new HashMap<>();
topicMetadata.put(topic, 1);
topicMetadata.put(unmatchedTopic, 1);
Cluster cluster = TestUtils.clusterWith(1, topicMetadata);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
Node node = cluster.nodes().get(0);
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null);
consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer));
client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
consumer.poll(0);
assertEquals(singleton(topic), consumer.subscription());
assertEquals(singleton(tp0), consumer.assignment());
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method verifyHeartbeatSentWhenFetchedDataReady.
@Test
public void verifyHeartbeatSentWhenFetchedDataReady() throws Exception {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 1000;
int autoCommitIntervalMs = 10000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
consumer.poll(0);
// respond to the outstanding fetch so that we have data available on the next poll
client.respondFrom(fetchResponse(tp0, 0, 5), node);
client.poll(0, time.milliseconds());
client.prepareResponseFrom(fetchResponse(tp0, 5, 0), node);
AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator);
time.sleep(heartbeatIntervalMs);
Thread.sleep(heartbeatIntervalMs);
consumer.poll(0);
assertTrue(heartbeatReceived.get());
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitDisabled.
/**
* Verify that when a consumer changes its topic subscription its assigned partitions
* do not immediately change, and the consumed offsets of its to-be-revoked partitions
* are not committed (when auto-commit is disabled).
* Upon unsubscribing from subscribed topics, the assigned partitions immediately
* change but if auto-commit is disabled the consumer offsets are not committed.
*/
@Test
public void testSubscriptionChangesWithAutoCommitDisabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, autoCommitIntervalMs);
// initial subscription
consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().equals(Collections.singleton(topic)));
assertTrue(consumer.assignment().isEmpty());
// mock rebalance responses
prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
consumer.poll(0);
// verify that subscription is still the same, and now assignment has caught up
assertTrue(consumer.subscription().equals(Collections.singleton(topic)));
assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
consumer.poll(0);
// subscription change
consumer.subscribe(Arrays.asList(topic2), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().equals(Collections.singleton(topic2)));
assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
// subscription change
consumer.unsubscribe();
// verify that subscription and assignment are both updated
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
client.requests().clear();
consumer.close();
}
Aggregations