use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitEnabled.
@Test
public void testManualAssignmentChangeWithAutoCommitEnabled() {
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true);
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(singleton(tp0));
consumer.seekToBeginning(singleton(tp0));
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
assertEquals(0, consumer.committed(tp0).offset());
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(singleton(tp0)));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L)));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(5);
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// mock the offset commit response for to be revoked partitions
AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11);
// new manual assignment
consumer.assign(singleton(t2p0));
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(singleton(t2p0)));
// verify that the offset commits occurred as expected
assertTrue(commitReceived.get());
client.requests().clear();
consumer.close();
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method prepareRebalance.
private Node prepareRebalance(MockClient client, Node node, PartitionAssignor assignor, List<TopicPartition> partitions, Node coordinator) {
if (coordinator == null) {
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
}
// join group
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator);
// sync group
client.prepareResponseFrom(syncGroupResponse(partitions, Errors.NONE), coordinator);
return coordinator;
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testResetToCommittedOffset.
@Test
public void testResetToCommittedOffset() {
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator);
consumer.poll(0);
assertEquals(539L, consumer.position(tp0));
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testMissingOffsetNoResetPolicy.
@Test(expected = NoOffsetForPartitionException.class)
public void testMissingOffsetNoResetPolicy() {
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, OffsetResetStrategy.NONE, true);
consumer.assign(singletonList(tp0));
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// lookup committed offset and find nothing
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator);
consumer.poll(0);
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project kafka by apache.
the class KafkaAdminClientTest method testRetryDescribeTransactionsAfterNotCoordinatorError.
@Test
public void testRetryDescribeTransactionsAfterNotCoordinatorError() throws Exception {
MockTime time = new MockTime();
int retryBackoffMs = 100;
Cluster cluster = mockCluster(3, 0);
Map<String, Object> configOverride = newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoffMs);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, configOverride)) {
String transactionalId = "foo";
Iterator<Node> nodeIterator = env.cluster().nodes().iterator();
Node coordinator1 = nodeIterator.next();
Node coordinator2 = nodeIterator.next();
env.kafkaClient().prepareResponse(request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData().setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator().setKey(transactionalId).setErrorCode(Errors.NONE.code()).setNodeId(coordinator1.id()).setHost(coordinator1.host()).setPort(coordinator1.port())))));
env.kafkaClient().prepareResponseFrom(request -> {
if (!(request instanceof DescribeTransactionsRequest)) {
return false;
} else {
// Backoff needed here for the retry of FindCoordinator
time.sleep(retryBackoffMs);
return true;
}
}, new DescribeTransactionsResponse(new DescribeTransactionsResponseData().setTransactionStates(singletonList(new DescribeTransactionsResponseData.TransactionState().setErrorCode(Errors.NOT_COORDINATOR.code()).setTransactionalId(transactionalId)))), coordinator1);
env.kafkaClient().prepareResponse(request -> request instanceof FindCoordinatorRequest, new FindCoordinatorResponse(new FindCoordinatorResponseData().setCoordinators(Arrays.asList(new FindCoordinatorResponseData.Coordinator().setKey(transactionalId).setErrorCode(Errors.NONE.code()).setNodeId(coordinator2.id()).setHost(coordinator2.host()).setPort(coordinator2.port())))));
TransactionDescription expected = new TransactionDescription(coordinator2.id(), TransactionState.COMPLETE_COMMIT, 12345L, 15, 10000L, OptionalLong.empty(), emptySet());
env.kafkaClient().prepareResponseFrom(request -> request instanceof DescribeTransactionsRequest, new DescribeTransactionsResponse(new DescribeTransactionsResponseData().setTransactionStates(singletonList(new DescribeTransactionsResponseData.TransactionState().setErrorCode(Errors.NONE.code()).setProducerEpoch((short) expected.producerEpoch()).setProducerId(expected.producerId()).setTransactionalId(transactionalId).setTransactionTimeoutMs(10000).setTransactionStartTimeMs(-1).setTransactionState(expected.state().toString())))), coordinator2);
DescribeTransactionsResult result = env.adminClient().describeTransactions(singleton(transactionalId));
KafkaFuture<TransactionDescription> future = result.description(transactionalId);
assertEquals(expected, future.get());
}
}
Aggregations