use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testPollThrowsInterruptExceptionIfInterrupted.
@Test
public void testPollThrowsInterruptExceptionIfInterrupted() throws Exception {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
final Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
final Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
final MockClient client = new MockClient(time, metadata);
client.setNode(node);
final PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, 0);
consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
consumer.poll(0);
// interrupt the thread and call poll
try {
Thread.currentThread().interrupt();
expectedException.expect(InterruptException.class);
consumer.poll(0);
} finally {
// clear interrupted state again since this thread may be reused by JUnit
Thread.interrupted();
}
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method consumerCloseTest.
private void consumerCloseTest(final long closeTimeoutMs, List<? extends AbstractResponse> responses, long waitMs, boolean interrupt) throws Exception {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 5000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, 1000);
consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
// Poll with responses
client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
consumer.poll(0);
// Initiate close() after a commit request on another thread.
// Kafka consumer is single-threaded, but the implementation allows calls on a
// different thread as long as the calls are not executed concurrently. So this is safe.
ExecutorService executor = Executors.newSingleThreadExecutor();
final AtomicReference<Exception> closeException = new AtomicReference<Exception>();
try {
Future<?> future = executor.submit(new Runnable() {
@Override
public void run() {
consumer.commitAsync();
try {
consumer.close(closeTimeoutMs, TimeUnit.MILLISECONDS);
} catch (Exception e) {
closeException.set(e);
}
}
});
// if close timeout is not zero.
try {
future.get(100, TimeUnit.MILLISECONDS);
if (closeTimeoutMs != 0)
fail("Close completed without waiting for commit or leave response");
} catch (TimeoutException e) {
// Expected exception
}
// Ensure close has started and queued at least one more request after commitAsync
client.waitForRequests(2, 1000);
// In non-graceful mode, close() times out without an exception even though commit response is pending
for (int i = 0; i < responses.size(); i++) {
client.waitForRequests(1, 1000);
client.respondFrom(responses.get(i), coordinator);
if (i != responses.size() - 1) {
try {
future.get(100, TimeUnit.MILLISECONDS);
fail("Close completed without waiting for response");
} catch (TimeoutException e) {
// Expected exception
}
}
}
if (waitMs > 0)
time.sleep(waitMs);
if (interrupt)
assertTrue("Close terminated prematurely", future.cancel(true));
// Make sure that close task completes and another task can be run on the single threaded executor
executor.submit(new Runnable() {
@Override
public void run() {
}
}).get(500, TimeUnit.MILLISECONDS);
if (!interrupt) {
// Should succeed without TimeoutException or ExecutionException
future.get(500, TimeUnit.MILLISECONDS);
assertNull("Unexpected exception during close", closeException.get());
} else
assertTrue("Expected exception not thrown " + closeException, closeException.get() instanceof InterruptException);
} finally {
executor.shutdownNow();
}
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method fetchResponseWithUnexpectedPartitionIsIgnored.
@Test
public void fetchResponseWithUnexpectedPartitionIsIgnored() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
// adjust auto commit interval lower than heartbeat so we don't need to deal with
// a concurrent heartbeat request
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(singletonMap(topic, 1));
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
fetches1.put(tp0, new FetchInfo(0, 1));
// not assigned and not fetched
fetches1.put(t2p0, new FetchInfo(0, 10));
client.prepareResponseFrom(fetchResponse(fetches1), node);
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(0, records.count());
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitDisabled.
@Test
public void testManualAssignmentChangeWithAutoCommitDisabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, autoCommitIntervalMs);
// lookup coordinator
client.prepareResponseFrom(new GroupCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(Arrays.asList(tp0));
consumer.seekToBeginning(Arrays.asList(tp0));
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
assertEquals(0, consumer.committed(tp0).offset());
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(tp0)));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L), Errors.NONE));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// new manual assignment
consumer.assign(Arrays.asList(t2p0));
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(Collections.singleton(t2p0)));
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
client.requests().clear();
consumer.close();
}
use of org.apache.kafka.clients.consumer.internals.PartitionAssignor in project kafka by apache.
the class KafkaConsumerTest method testSubscriptionChangesWithAutoCommitEnabled.
/**
* Verify that when a consumer changes its topic subscription its assigned partitions
* do not immediately change, and the latest consumed offsets of its to-be-revoked
* partitions are properly committed (when auto-commit is enabled).
* Upon unsubscribing from subscribed topics the consumer subscription and assignment
* are both updated right away but its consumed offsets are not auto committed.
*/
@Test
public void testSubscriptionChangesWithAutoCommitEnabled() {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 3000;
// adjust auto commit interval lower than heartbeat so we don't need to deal with
// a concurrent heartbeat request
int autoCommitIntervalMs = 1000;
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
tpCounts.put(topic3, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, true, autoCommitIntervalMs);
// initial subscription
consumer.subscribe(Arrays.asList(topic, topic2), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2));
assertTrue(consumer.assignment().isEmpty());
// mock rebalance responses
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0, t2p0), null);
consumer.poll(0);
// verify that subscription is still the same, and now assignment has caught up
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic2));
assertTrue(consumer.assignment().size() == 2);
assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0));
// mock a response to the outstanding fetch so that we have data available on the next poll
Map<TopicPartition, FetchInfo> fetches1 = new HashMap<>();
fetches1.put(tp0, new FetchInfo(0, 1));
fetches1.put(t2p0, new FetchInfo(0, 10));
client.respondFrom(fetchResponse(fetches1), node);
client.poll(0, time.milliseconds());
ConsumerRecords<String, String> records = consumer.poll(0);
// clear out the prefetch so it doesn't interfere with the rest of the test
fetches1.put(tp0, new FetchInfo(1, 0));
fetches1.put(t2p0, new FetchInfo(10, 0));
client.respondFrom(fetchResponse(fetches1), node);
client.poll(0, time.milliseconds());
// verify that the fetch occurred as expected
assertEquals(11, records.count());
assertEquals(1L, consumer.position(tp0));
assertEquals(10L, consumer.position(t2p0));
// subscription change
consumer.subscribe(Arrays.asList(topic, topic3), getConsumerRebalanceListener(consumer));
// verify that subscription has changed but assignment is still unchanged
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3));
assertTrue(consumer.assignment().size() == 2);
assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t2p0));
// mock the offset commit response for to be revoked partitions
Map<TopicPartition, Long> partitionOffsets1 = new HashMap<>();
partitionOffsets1.put(tp0, 1L);
partitionOffsets1.put(t2p0, 10L);
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, partitionOffsets1);
// mock rebalance responses
prepareRebalance(client, node, assignor, Arrays.asList(tp0, t3p0), coordinator);
// mock a response to the next fetch from the new assignment
Map<TopicPartition, FetchInfo> fetches2 = new HashMap<>();
fetches2.put(tp0, new FetchInfo(1, 1));
fetches2.put(t3p0, new FetchInfo(0, 100));
client.prepareResponse(fetchResponse(fetches2));
records = consumer.poll(0);
// verify that the fetch occurred as expected
assertEquals(101, records.count());
assertEquals(2L, consumer.position(tp0));
assertEquals(100L, consumer.position(t3p0));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
// verify that subscription is still the same, and now assignment has caught up
assertTrue(consumer.subscription().size() == 2);
assertTrue(consumer.subscription().contains(topic) && consumer.subscription().contains(topic3));
assertTrue(consumer.assignment().size() == 2);
assertTrue(consumer.assignment().contains(tp0) && consumer.assignment().contains(t3p0));
consumer.unsubscribe();
// verify that subscription and assignment are both cleared
assertTrue(consumer.subscription().isEmpty());
assertTrue(consumer.assignment().isEmpty());
client.requests().clear();
consumer.close();
}
Aggregations