use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaConsumerTest method testPartitionsForNonExistingTopic.
@Test
public void testPartitionsForNonExistingTopic() {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Cluster cluster = metadata.fetch();
MetadataResponse updateResponse = RequestTestUtils.metadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), Collections.emptyList());
client.prepareResponse(updateResponse);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, true, groupInstanceId);
assertEquals(Collections.emptyList(), consumer.partitionsFor("non-exist-topic"));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class ConsumerCoordinatorTest method testForceMetadataDeleteForPatternSubscriptionDuringRebalance.
@Test
public void testForceMetadataDeleteForPatternSubscriptionDuringRebalance() {
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) {
subscriptions.subscribe(Pattern.compile("test.*"), rebalanceListener);
client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap<String, Integer>() {
{
put(topic1, 1);
put(topic2, 1);
}
}));
coordinator.maybeUpdateSubscriptionMetadata();
assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), subscriptions.subscription());
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap<String, Integer>() {
{
put(topic1, 1);
}
});
// Instrument the test so that metadata will contain only one topic after next refresh.
client.prepareMetadataUpdate(deletedMetadataResponse);
client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE));
client.prepareResponse(body -> {
SyncGroupRequest sync = (SyncGroupRequest) body;
return sync.data().memberId().equals(consumerId) && sync.data().generationId() == 1 && sync.groupAssignments().isEmpty();
}, syncGroupResponse(singletonList(t1p), Errors.NONE));
partitionAssignor.prepare(singletonMap(consumerId, singletonList(t1p)));
// This will trigger rebalance.
coordinator.poll(time.timer(Long.MAX_VALUE));
// Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain only one topic.
assertEquals(singleton(topic1), subscriptions.subscription());
// Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger
// rebalance again.
metadata.requestUpdate();
consumerClient.poll(time.timer(Long.MAX_VALUE));
assertFalse(coordinator.rejoinNeededOrPending());
}
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class ConsumerMetadataTest method testBasicSubscription.
private void testBasicSubscription(Set<String> expectedTopics, Set<String> expectedInternalTopics) {
Set<String> allTopics = new HashSet<>();
allTopics.addAll(expectedTopics);
allTopics.addAll(expectedInternalTopics);
ConsumerMetadata metadata = newConsumerMetadata(false);
MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder();
assertEquals(allTopics, new HashSet<>(builder.topics()));
List<MetadataResponse.TopicMetadata> topics = new ArrayList<>();
for (String expectedTopic : expectedTopics) topics.add(topicMetadata(expectedTopic, false));
for (String expectedInternalTopic : expectedInternalTopics) topics.add(topicMetadata(expectedInternalTopic, true));
MetadataResponse response = RequestTestUtils.metadataResponse(singletonList(node), "clusterId", node.id(), topics);
metadata.updateWithCurrentRequestVersion(response, false, time.milliseconds());
assertEquals(allTopics, metadata.fetch().topics());
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class ConsumerMetadataTest method testPatternSubscription.
private void testPatternSubscription(boolean includeInternalTopics) {
subscription.subscribe(Pattern.compile("__.*"), new NoOpConsumerRebalanceListener());
ConsumerMetadata metadata = newConsumerMetadata(includeInternalTopics);
MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder();
assertTrue(builder.isAllTopics());
List<MetadataResponse.TopicMetadata> topics = new ArrayList<>();
topics.add(topicMetadata("__consumer_offsets", true));
topics.add(topicMetadata("__matching_topic", false));
topics.add(topicMetadata("non_matching_topic", false));
MetadataResponse response = RequestTestUtils.metadataResponse(singletonList(node), "clusterId", node.id(), topics);
metadata.updateWithCurrentRequestVersion(response, false, time.milliseconds());
if (includeInternalTopics)
assertEquals(Utils.mkSet("__matching_topic", "__consumer_offsets"), metadata.fetch().topics());
else
assertEquals(Collections.singleton("__matching_topic"), metadata.fetch().topics());
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaAdminClientTest method testAlterPartitionReassignments.
@Test
public void testAlterPartitionReassignments() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
TopicPartition tp1 = new TopicPartition("A", 0);
TopicPartition tp2 = new TopicPartition("B", 0);
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments = new HashMap<>();
reassignments.put(tp1, Optional.empty());
reassignments.put(tp2, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
// 1. server returns less responses than number of partitions we sent
AlterPartitionReassignmentsResponseData responseData1 = new AlterPartitionReassignmentsResponseData();
ReassignablePartitionResponse normalPartitionResponse = new ReassignablePartitionResponse().setPartitionIndex(0);
responseData1.setResponses(Collections.singletonList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse))));
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(responseData1));
AlterPartitionReassignmentsResult result1 = env.adminClient().alterPartitionReassignments(reassignments);
Future<Void> future1 = result1.all();
Future<Void> future2 = result1.values().get(tp1);
TestUtils.assertFutureError(future1, UnknownServerException.class);
TestUtils.assertFutureError(future2, UnknownServerException.class);
// 2. NOT_CONTROLLER error handling
AlterPartitionReassignmentsResponseData controllerErrResponseData = new AlterPartitionReassignmentsResponseData().setErrorCode(Errors.NOT_CONTROLLER.code()).setErrorMessage(Errors.NOT_CONTROLLER.message()).setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
MetadataResponse controllerNodeResponse = RequestTestUtils.metadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.emptyList());
AlterPartitionReassignmentsResponseData normalResponse = new AlterPartitionReassignmentsResponseData().setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(controllerErrResponseData));
env.kafkaClient().prepareResponse(controllerNodeResponse);
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(normalResponse));
AlterPartitionReassignmentsResult controllerErrResult = env.adminClient().alterPartitionReassignments(reassignments);
controllerErrResult.all().get();
controllerErrResult.values().get(tp1).get();
controllerErrResult.values().get(tp2).get();
// 3. partition-level error
AlterPartitionReassignmentsResponseData partitionLevelErrData = new AlterPartitionReassignmentsResponseData().setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(new ReassignablePartitionResponse().setPartitionIndex(0).setErrorMessage(Errors.INVALID_REPLICA_ASSIGNMENT.message()).setErrorCode(Errors.INVALID_REPLICA_ASSIGNMENT.code()))), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(partitionLevelErrData));
AlterPartitionReassignmentsResult partitionLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments);
TestUtils.assertFutureError(partitionLevelErrResult.values().get(tp1), Errors.INVALID_REPLICA_ASSIGNMENT.exception().getClass());
partitionLevelErrResult.values().get(tp2).get();
// 4. top-level error
String errorMessage = "this is custom error message";
AlterPartitionReassignmentsResponseData topLevelErrResponseData = new AlterPartitionReassignmentsResponseData().setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()).setErrorMessage(errorMessage).setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(topLevelErrResponseData));
AlterPartitionReassignmentsResult topLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments);
assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.all(), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage());
assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp1), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage());
assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp2), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage());
// 5. unrepresentable topic name error
TopicPartition invalidTopicTP = new TopicPartition("", 0);
TopicPartition invalidPartitionTP = new TopicPartition("ABC", -1);
Map<TopicPartition, Optional<NewPartitionReassignment>> invalidTopicReassignments = new HashMap<>();
invalidTopicReassignments.put(invalidPartitionTP, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
invalidTopicReassignments.put(invalidTopicTP, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
invalidTopicReassignments.put(tp1, Optional.of(new NewPartitionReassignment(Arrays.asList(1, 2, 3))));
AlterPartitionReassignmentsResponseData singlePartResponseData = new AlterPartitionReassignmentsResponseData().setResponses(Collections.singletonList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse))));
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(singlePartResponseData));
AlterPartitionReassignmentsResult unrepresentableTopicResult = env.adminClient().alterPartitionReassignments(invalidTopicReassignments);
TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidTopicTP), InvalidTopicException.class);
TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidPartitionTP), InvalidTopicException.class);
unrepresentableTopicResult.values().get(tp1).get();
// Test success scenario
AlterPartitionReassignmentsResponseData noErrResponseData = new AlterPartitionReassignmentsResponseData().setErrorCode(Errors.NONE.code()).setErrorMessage(Errors.NONE.message()).setResponses(Arrays.asList(new ReassignableTopicResponse().setName("A").setPartitions(Collections.singletonList(normalPartitionResponse)), new ReassignableTopicResponse().setName("B").setPartitions(Collections.singletonList(normalPartitionResponse))));
env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(noErrResponseData));
AlterPartitionReassignmentsResult noErrResult = env.adminClient().alterPartitionReassignments(reassignments);
noErrResult.all().get();
noErrResult.values().get(tp1).get();
noErrResult.values().get(tp2).get();
}
}
Aggregations