use of org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition in project kafka by apache.
the class GetListOffsetsCallsBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
MetadataResponseData data = new MetadataResponseData();
List<MetadataResponseTopic> mrTopicList = new ArrayList<>();
Set<String> topics = new HashSet<>();
for (int topicIndex = 0; topicIndex < topicCount; topicIndex++) {
Uuid topicId = Uuid.randomUuid();
String topicName = "topic-" + topicIndex;
MetadataResponseTopic mrTopic = new MetadataResponseTopic().setTopicId(topicId).setName(topicName).setErrorCode((short) 0).setIsInternal(false);
List<MetadataResponsePartition> mrPartitionList = new ArrayList<>();
for (int partition = 0; partition < partitionCount; partition++) {
TopicPartition tp = new TopicPartition(topicName, partition);
topics.add(tp.topic());
futures.put(tp, new KafkaFutureImpl<>());
topicPartitionOffsets.put(tp, OffsetSpec.latest());
MetadataResponsePartition mrPartition = new MetadataResponsePartition().setLeaderId(partition % numNodes).setPartitionIndex(partition).setIsrNodes(Arrays.asList(0, 1, 2)).setReplicaNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode((short) 0);
mrPartitionList.add(mrPartition);
}
mrTopic.setPartitions(mrPartitionList);
mrTopicList.add(mrTopic);
}
data.setTopics(new MetadataResponseData.MetadataResponseTopicCollection(mrTopicList.listIterator()));
long deadline = 0L;
short version = 0;
context = new MetadataOperationContext<>(topics, new ListOffsetsOptions(), deadline, futures);
context.setResponse(Optional.of(new MetadataResponse(data, version)));
AdminClientUnitTestEnv adminEnv = new AdminClientUnitTestEnv(mockCluster());
admin = (KafkaAdminClient) adminEnv.adminClient();
}
use of org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition in project kafka by apache.
the class KafkaAdminClientTest method prepareMetadataResponse.
private static MetadataResponse prepareMetadataResponse(Cluster cluster, Errors topicError, Errors partitionError) {
List<MetadataResponseTopic> metadata = new ArrayList<>();
for (String topic : cluster.topics()) {
List<MetadataResponsePartition> pms = new ArrayList<>();
for (PartitionInfo pInfo : cluster.availablePartitionsForTopic(topic)) {
MetadataResponsePartition pm = new MetadataResponsePartition().setErrorCode(partitionError.code()).setPartitionIndex(pInfo.partition()).setLeaderId(pInfo.leader().id()).setLeaderEpoch(234).setReplicaNodes(Arrays.stream(pInfo.replicas()).map(Node::id).collect(Collectors.toList())).setIsrNodes(Arrays.stream(pInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toList())).setOfflineReplicas(Arrays.stream(pInfo.offlineReplicas()).map(Node::id).collect(Collectors.toList()));
pms.add(pm);
}
MetadataResponseTopic tm = new MetadataResponseTopic().setErrorCode(topicError.code()).setName(topic).setIsInternal(false).setPartitions(pms);
metadata.add(tm);
}
return MetadataResponse.prepareResponse(true, 0, cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), metadata, MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED);
}
use of org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition in project kafka by apache.
the class MetadataTest method testIgnoreLeaderEpochInOlderMetadataResponse.
/**
* Prior to Kafka version 2.4 (which coincides with Metadata version 9), the broker does not propagate leader epoch
* information accurately while a reassignment is in progress, so we cannot rely on it. This is explained in more
* detail in MetadataResponse's constructor.
*/
@Test
public void testIgnoreLeaderEpochInOlderMetadataResponse() {
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponsePartition partitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setLeaderId(5).setLeaderEpoch(10).setReplicaNodes(Arrays.asList(1, 2, 3)).setIsrNodes(Arrays.asList(1, 2, 3)).setOfflineReplicas(Collections.emptyList()).setErrorCode(Errors.NONE.code());
MetadataResponseTopic topicMetadata = new MetadataResponseTopic().setName(tp.topic()).setErrorCode(Errors.NONE.code()).setPartitions(Collections.singletonList(partitionMetadata)).setIsInternal(false);
MetadataResponseTopicCollection topics = new MetadataResponseTopicCollection();
topics.add(topicMetadata);
MetadataResponseData data = new MetadataResponseData().setClusterId("clusterId").setControllerId(0).setTopics(topics).setBrokers(new MetadataResponseBrokerCollection());
for (short version = ApiKeys.METADATA.oldestVersion(); version < 9; version++) {
ByteBuffer buffer = MessageUtil.toByteBuffer(data, version);
MetadataResponse response = MetadataResponse.parse(buffer, version);
assertFalse(response.hasReliableLeaderEpochs());
metadata.updateWithCurrentRequestVersion(response, false, 100);
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = this.metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Optional.empty(), responseMetadata.leaderEpoch);
}
for (short version = 9; version <= ApiKeys.METADATA.latestVersion(); version++) {
ByteBuffer buffer = MessageUtil.toByteBuffer(data, version);
MetadataResponse response = MetadataResponse.parse(buffer, version);
assertTrue(response.hasReliableLeaderEpochs());
metadata.updateWithCurrentRequestVersion(response, false, 100);
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Optional.of(10), responseMetadata.leaderEpoch);
}
}
use of org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition in project kafka by apache.
the class MetadataTest method testStaleMetadata.
@Test
public void testStaleMetadata() {
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponsePartition partitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setLeaderId(1).setLeaderEpoch(10).setReplicaNodes(Arrays.asList(1, 2, 3)).setIsrNodes(Arrays.asList(1, 2, 3)).setOfflineReplicas(Collections.emptyList()).setErrorCode(Errors.NONE.code());
MetadataResponseTopic topicMetadata = new MetadataResponseTopic().setName(tp.topic()).setErrorCode(Errors.NONE.code()).setPartitions(Collections.singletonList(partitionMetadata)).setIsInternal(false);
MetadataResponseTopicCollection topics = new MetadataResponseTopicCollection();
topics.add(topicMetadata);
MetadataResponseData data = new MetadataResponseData().setClusterId("clusterId").setControllerId(0).setTopics(topics).setBrokers(new MetadataResponseBrokerCollection());
metadata.updateWithCurrentRequestVersion(new MetadataResponse(data, ApiKeys.METADATA.latestVersion()), false, 100);
// Older epoch with changed ISR should be ignored
partitionMetadata.setPartitionIndex(tp.partition()).setLeaderId(1).setLeaderEpoch(9).setReplicaNodes(Arrays.asList(1, 2, 3)).setIsrNodes(Arrays.asList(1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode(Errors.NONE.code());
metadata.updateWithCurrentRequestVersion(new MetadataResponse(data, ApiKeys.METADATA.latestVersion()), false, 101);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = this.metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Arrays.asList(1, 2, 3), responseMetadata.inSyncReplicaIds);
assertEquals(Optional.of(10), responseMetadata.leaderEpoch);
}
use of org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition in project kafka by apache.
the class MetadataTest method testLeaderMetadataInconsistentWithBrokerMetadata.
@Test
public void testLeaderMetadataInconsistentWithBrokerMetadata() {
// Tests a reordering scenario which can lead to inconsistent leader state.
// A partition initially has one broker offline. That broker comes online and
// is elected leader. The client sees these two events in the opposite order.
TopicPartition tp = new TopicPartition("topic", 0);
Node node0 = new Node(0, "localhost", 9092);
Node node1 = new Node(1, "localhost", 9093);
Node node2 = new Node(2, "localhost", 9094);
// The first metadata received by broker (epoch=10)
MetadataResponsePartition firstPartitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setLeaderEpoch(10).setLeaderId(0).setReplicaNodes(Arrays.asList(0, 1, 2)).setIsrNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList());
// The second metadata received has stale metadata (epoch=8)
MetadataResponsePartition secondPartitionMetadata = new MetadataResponsePartition().setPartitionIndex(tp.partition()).setErrorCode(Errors.NONE.code()).setLeaderEpoch(8).setLeaderId(1).setReplicaNodes(Arrays.asList(0, 1, 2)).setIsrNodes(Arrays.asList(1, 2)).setOfflineReplicas(Collections.singletonList(0));
metadata.updateWithCurrentRequestVersion(new MetadataResponse(new MetadataResponseData().setTopics(buildTopicCollection(tp.topic(), firstPartitionMetadata)).setBrokers(buildBrokerCollection(Arrays.asList(node0, node1, node2))), ApiKeys.METADATA.latestVersion()), false, 10L);
metadata.updateWithCurrentRequestVersion(new MetadataResponse(new MetadataResponseData().setTopics(buildTopicCollection(tp.topic(), secondPartitionMetadata)).setBrokers(buildBrokerCollection(Arrays.asList(node1, node2))), ApiKeys.METADATA.latestVersion()), false, 20L);
assertNull(metadata.fetch().leaderFor(tp));
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
assertFalse(metadata.currentLeader(tp).leader.isPresent());
}
Aggregations