use of org.apache.kafka.common.requests.FindCoordinatorResponse in project kafka by apache.
the class CoordinatorStrategyTest method testHandleOldResponseRequiresOneKey.
@Test
public void testHandleOldResponseRequiresOneKey() {
FindCoordinatorResponseData responseData = new FindCoordinatorResponseData().setErrorCode(Errors.NONE.code());
FindCoordinatorResponse response = new FindCoordinatorResponse(responseData);
CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext());
strategy.disableBatch();
assertThrows(IllegalArgumentException.class, () -> strategy.handleResponse(Collections.emptySet(), response));
CoordinatorKey group1 = CoordinatorKey.byGroupId("foo");
CoordinatorKey group2 = CoordinatorKey.byGroupId("bar");
assertThrows(IllegalArgumentException.class, () -> strategy.handleResponse(mkSet(group1, group2), response));
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testCommitsFetchedDuringAssign.
@Test
public void testCommitsFetchedDuringAssign() {
long offset1 = 10000;
long offset2 = 20000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 2);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true);
consumer.assign(singletonList(tp0));
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(tp0).offset());
consumer.assign(Arrays.asList(tp0, tp1));
// fetch offset for two topics
Map<TopicPartition, Long> offsets = new HashMap<>();
offsets.put(tp0, offset1);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset1, consumer.committed(tp0).offset());
offsets.remove(tp0);
offsets.put(tp1, offset2);
client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator);
assertEquals(offset2, consumer.committed(tp1).offset());
consumer.close(0, TimeUnit.MILLISECONDS);
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testManualAssignmentChangeWithAutoCommitDisabled.
@Test
public void testManualAssignmentChangeWithAutoCommitDisabled() {
Time time = new MockTime();
Map<String, Integer> tpCounts = new HashMap<>();
tpCounts.put(topic, 1);
tpCounts.put(topic2, 1);
Cluster cluster = TestUtils.singletonCluster(tpCounts);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RangeAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false);
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
// manual assignment
consumer.assign(singleton(tp0));
consumer.seekToBeginning(singleton(tp0));
// fetch offset for one topic
client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator);
assertEquals(0, consumer.committed(tp0).offset());
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(singleton(tp0)));
// there shouldn't be any need to lookup the coordinator or fetch committed offsets.
// we just lookup the starting position and send the record fetch.
client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L)));
client.prepareResponse(fetchResponse(tp0, 10L, 1));
ConsumerRecords<String, String> records = consumer.poll(5);
assertEquals(1, records.count());
assertEquals(11L, consumer.position(tp0));
// new manual assignment
consumer.assign(singleton(t2p0));
// verify that assignment immediately changes
assertTrue(consumer.assignment().equals(singleton(t2p0)));
// the auto commit is disabled, so no offset commit request should be sent
for (ClientRequest req : client.requests()) assertTrue(req.requestBuilder().apiKey() != ApiKeys.OFFSET_COMMIT);
client.requests().clear();
consumer.close();
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method shouldAttemptToRejoinGroupAfterSyncGroupFailed.
@Test
public void shouldAttemptToRejoinGroupAfterSyncGroupFailed() throws Exception {
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE, false);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, false);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator);
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator);
client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
consumer.poll(0);
// heartbeat fails due to rebalance in progress
client.prepareResponseFrom(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return true;
}
}, new HeartbeatResponse(Errors.REBALANCE_IN_PROGRESS), coordinator);
// join group
final ByteBuffer byteBuffer = ConsumerProtocol.serializeSubscription(new PartitionAssignor.Subscription(singletonList(topic)));
// This member becomes the leader
final JoinGroupResponse leaderResponse = new JoinGroupResponse(Errors.NONE, 1, assignor.name(), "memberId", "memberId", Collections.singletonMap("memberId", byteBuffer));
client.prepareResponseFrom(leaderResponse, coordinator);
// sync group fails due to disconnect
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator, true);
// should try and find the new coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
// rejoin group
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator);
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator);
client.prepareResponseFrom(new MockClient.RequestMatcher() {
@Override
public boolean matches(final AbstractRequest body) {
return body instanceof FetchRequest && ((FetchRequest) body).fetchData().containsKey(tp0);
}
}, fetchResponse(tp0, 1, 1), node);
time.sleep(heartbeatIntervalMs);
Thread.sleep(heartbeatIntervalMs);
final ConsumerRecords<String, String> records = consumer.poll(0);
assertFalse(records.isEmpty());
consumer.close(0, TimeUnit.MILLISECONDS);
}
use of org.apache.kafka.common.requests.FindCoordinatorResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method prepareRebalance.
private Node prepareRebalance(MockClient client, Node node, final Set<String> subscribedTopics, PartitionAssignor assignor, List<TopicPartition> partitions, Node coordinator) {
if (coordinator == null) {
// lookup coordinator
client.prepareResponseFrom(new FindCoordinatorResponse(Errors.NONE, node), node);
coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
}
// join group
client.prepareResponseFrom(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
JoinGroupRequest joinGroupRequest = (JoinGroupRequest) body;
PartitionAssignor.Subscription subscription = ConsumerProtocol.deserializeSubscription(joinGroupRequest.groupProtocols().get(0).metadata());
return subscribedTopics.equals(new HashSet<>(subscription.topics()));
}
}, joinGroupFollowerResponse(assignor, 1, "memberId", "leaderId", Errors.NONE), coordinator);
// sync group
client.prepareResponseFrom(syncGroupResponse(partitions, Errors.NONE), coordinator);
return coordinator;
}
Aggregations