use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testStaleMetadata.
@Test
public void testStaleMetadata() {
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponsePartition partitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setLeaderId(1).setLeaderEpoch(10).setReplicaNodes(Arrays.asList(1, 2, 3)).setIsrNodes(Arrays.asList(1, 2, 3)).setOfflineReplicas(Collections.emptyList()).setErrorCode(Errors.NONE.code());
MetadataResponseTopic topicMetadata = new MetadataResponseTopic().setName(tp.topic()).setErrorCode(Errors.NONE.code()).setPartitions(Collections.singletonList(partitionMetadata)).setIsInternal(false);
MetadataResponseTopicCollection topics = new MetadataResponseTopicCollection();
topics.add(topicMetadata);
MetadataResponseData data = new MetadataResponseData().setClusterId("clusterId").setControllerId(0).setTopics(topics).setBrokers(new MetadataResponseBrokerCollection());
metadata.updateWithCurrentRequestVersion(new MetadataResponse(data, ApiKeys.METADATA.latestVersion()), false, 100);
// Older epoch with changed ISR should be ignored
partitionMetadata.setPartitionIndex(tp.partition()).setLeaderId(1).setLeaderEpoch(9).setReplicaNodes(Arrays.asList(1, 2, 3)).setIsrNodes(Arrays.asList(1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode(Errors.NONE.code());
metadata.updateWithCurrentRequestVersion(new MetadataResponse(data, ApiKeys.METADATA.latestVersion()), false, 101);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = this.metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Arrays.asList(1, 2, 3), responseMetadata.inSyncReplicaIds);
assertEquals(Optional.of(10), responseMetadata.leaderEpoch);
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testLeaderMetadataInconsistentWithBrokerMetadata.
@Test
public void testLeaderMetadataInconsistentWithBrokerMetadata() {
// Tests a reordering scenario which can lead to inconsistent leader state.
// A partition initially has one broker offline. That broker comes online and
// is elected leader. The client sees these two events in the opposite order.
TopicPartition tp = new TopicPartition("topic", 0);
Node node0 = new Node(0, "localhost", 9092);
Node node1 = new Node(1, "localhost", 9093);
Node node2 = new Node(2, "localhost", 9094);
// The first metadata received by broker (epoch=10)
MetadataResponsePartition firstPartitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setLeaderEpoch(10).setLeaderId(0).setReplicaNodes(Arrays.asList(0, 1, 2)).setIsrNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList());
// The second metadata received has stale metadata (epoch=8)
MetadataResponsePartition secondPartitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setLeaderEpoch(8).setLeaderId(1).setReplicaNodes(Arrays.asList(0, 1, 2)).setIsrNodes(Arrays.asList(1, 2)).setOfflineReplicas(Collections.singletonList(0));
metadata.updateWithCurrentRequestVersion(new MetadataResponse(new MetadataResponseData().setTopics(buildTopicCollection(tp.topic(), firstPartitionMetadata)).setBrokers(buildBrokerCollection(Arrays.asList(node0, node1, node2))), ApiKeys.METADATA.latestVersion()), false, 10L);
metadata.updateWithCurrentRequestVersion(new MetadataResponse(new MetadataResponseData().setTopics(buildTopicCollection(tp.topic(), secondPartitionMetadata)).setBrokers(buildBrokerCollection(Arrays.asList(node1, node2))), ApiKeys.METADATA.latestVersion()), false, 20L);
assertNull(metadata.fetch().leaderFor(tp));
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
assertFalse(metadata.currentLeader(tp).leader.isPresent());
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaAdminClientTest method testListPartitionReassignments.
@Test
public void testListPartitionReassignments() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
TopicPartition tp1 = new TopicPartition("A", 0);
OngoingPartitionReassignment tp1PartitionReassignment = new OngoingPartitionReassignment().setPartitionIndex(0).setRemovingReplicas(Arrays.asList(1, 2, 3)).setAddingReplicas(Arrays.asList(4, 5, 6)).setReplicas(Arrays.asList(1, 2, 3, 4, 5, 6));
OngoingTopicReassignment tp1Reassignment = new OngoingTopicReassignment().setName("A").setPartitions(Collections.singletonList(tp1PartitionReassignment));
TopicPartition tp2 = new TopicPartition("B", 0);
OngoingPartitionReassignment tp2PartitionReassignment = new OngoingPartitionReassignment().setPartitionIndex(0).setRemovingReplicas(Arrays.asList(1, 2, 3)).setAddingReplicas(Arrays.asList(4, 5, 6)).setReplicas(Arrays.asList(1, 2, 3, 4, 5, 6));
OngoingTopicReassignment tp2Reassignment = new OngoingTopicReassignment().setName("B").setPartitions(Collections.singletonList(tp2PartitionReassignment));
// 1. NOT_CONTROLLER error handling
ListPartitionReassignmentsResponseData notControllerData = new ListPartitionReassignmentsResponseData().setErrorCode(Errors.NOT_CONTROLLER.code()).setErrorMessage(Errors.NOT_CONTROLLER.message());
MetadataResponse controllerNodeResponse = RequestTestUtils.metadataResponse(env.cluster().nodes(), env.cluster().clusterResource().clusterId(), 1, Collections.emptyList());
ListPartitionReassignmentsResponseData reassignmentsData = new ListPartitionReassignmentsResponseData().setTopics(Arrays.asList(tp1Reassignment, tp2Reassignment));
env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(notControllerData));
env.kafkaClient().prepareResponse(controllerNodeResponse);
env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(reassignmentsData));
ListPartitionReassignmentsResult noControllerResult = env.adminClient().listPartitionReassignments();
// no error
noControllerResult.reassignments().get();
// 2. UNKNOWN_TOPIC_OR_EXCEPTION_ERROR
ListPartitionReassignmentsResponseData unknownTpData = new ListPartitionReassignmentsResponseData().setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()).setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message());
env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(unknownTpData));
ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(new HashSet<>(Arrays.asList(tp1, tp2)));
TestUtils.assertFutureError(unknownTpResult.reassignments(), UnknownTopicOrPartitionException.class);
// 3. Success
ListPartitionReassignmentsResponseData responseData = new ListPartitionReassignmentsResponseData().setTopics(Arrays.asList(tp1Reassignment, tp2Reassignment));
env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(responseData));
ListPartitionReassignmentsResult responseResult = env.adminClient().listPartitionReassignments();
Map<TopicPartition, PartitionReassignment> reassignments = responseResult.reassignments().get();
PartitionReassignment tp1Result = reassignments.get(tp1);
assertEquals(tp1PartitionReassignment.addingReplicas(), tp1Result.addingReplicas());
assertEquals(tp1PartitionReassignment.removingReplicas(), tp1Result.removingReplicas());
assertEquals(tp1PartitionReassignment.replicas(), tp1Result.replicas());
assertEquals(tp1PartitionReassignment.replicas(), tp1Result.replicas());
PartitionReassignment tp2Result = reassignments.get(tp2);
assertEquals(tp2PartitionReassignment.addingReplicas(), tp2Result.addingReplicas());
assertEquals(tp2PartitionReassignment.removingReplicas(), tp2Result.removingReplicas());
assertEquals(tp2PartitionReassignment.replicas(), tp2Result.replicas());
assertEquals(tp2PartitionReassignment.replicas(), tp2Result.replicas());
}
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class KafkaAdminClientTest method testListTransactions.
@Test
public void testListTransactions() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
MetadataResponseData.MetadataResponseBrokerCollection brokers = new MetadataResponseData.MetadataResponseBrokerCollection();
env.cluster().nodes().forEach(node -> {
brokers.add(new MetadataResponseData.MetadataResponseBroker().setHost(node.host()).setNodeId(node.id()).setPort(node.port()).setRack(node.rack()));
});
env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, new MetadataResponse(new MetadataResponseData().setBrokers(brokers), MetadataResponseData.HIGHEST_SUPPORTED_VERSION));
List<TransactionListing> expected = Arrays.asList(new TransactionListing("foo", 12345L, TransactionState.ONGOING), new TransactionListing("bar", 98765L, TransactionState.PREPARE_ABORT), new TransactionListing("baz", 13579L, TransactionState.COMPLETE_COMMIT));
assertEquals(Utils.mkSet(0, 1, 2), env.cluster().nodes().stream().map(Node::id).collect(Collectors.toSet()));
env.cluster().nodes().forEach(node -> {
ListTransactionsResponseData response = new ListTransactionsResponseData().setErrorCode(Errors.NONE.code());
TransactionListing listing = expected.get(node.id());
response.transactionStates().add(new ListTransactionsResponseData.TransactionState().setTransactionalId(listing.transactionalId()).setProducerId(listing.producerId()).setTransactionState(listing.state().toString()));
env.kafkaClient().prepareResponseFrom(request -> request instanceof ListTransactionsRequest, new ListTransactionsResponse(response), node);
});
ListTransactionsResult result = env.adminClient().listTransactions();
assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get()));
}
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testNoEpoch.
@Test
public void testNoEpoch() {
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1));
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
// no epoch
assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent());
// still works
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertEquals(0, metadata.partitionMetadataIfCurrent(tp).get().partition());
assertEquals(Optional.of(0), metadata.partitionMetadataIfCurrent(tp).get().leaderId);
// Since epoch was null, this shouldn't update it
metadata.updateLastSeenEpochIfNewer(tp, 10);
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertFalse(metadata.partitionMetadataIfCurrent(tp).get().leaderEpoch.isPresent());
}
Aggregations