use of org.apache.kafka.common.Uuid in project kafka by apache.
the class UpdateMetadataRequestTest method testVersionLogic.
/**
* Verifies the logic we have in UpdateMetadataRequest to present a unified interface across the various versions
* works correctly. For example, `UpdateMetadataPartitionState.topicName` is not serialiazed/deserialized in
* recent versions, but we set it manually so that we can always present the ungrouped partition states
* independently of the version.
*/
@Test
public void testVersionLogic() {
String topic0 = "topic0";
String topic1 = "topic1";
for (short version : UPDATE_METADATA.allVersions()) {
List<UpdateMetadataPartitionState> partitionStates = asList(new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setOfflineReplicas(asList(2)), new UpdateMetadataPartitionState().setTopicName(topic0).setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setOfflineReplicas(emptyList()), new UpdateMetadataPartitionState().setTopicName(topic1).setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setOfflineReplicas(emptyList()));
List<UpdateMetadataEndpoint> broker0Endpoints = new ArrayList<>();
broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id));
// Non plaintext endpoints are only supported from version 1
if (version >= 1) {
broker0Endpoints.add(new UpdateMetadataEndpoint().setHost("host0").setPort(9091).setSecurityProtocol(SecurityProtocol.SSL.id));
}
// Custom listeners are only supported from version 3
if (version >= 3) {
broker0Endpoints.get(0).setListener("listener0");
broker0Endpoints.get(1).setListener("listener1");
}
List<UpdateMetadataBroker> liveBrokers = asList(new UpdateMetadataBroker().setId(0).setRack("rack0").setEndpoints(broker0Endpoints), new UpdateMetadataBroker().setId(1).setEndpoints(asList(new UpdateMetadataEndpoint().setHost("host1").setPort(9090).setSecurityProtocol(SecurityProtocol.PLAINTEXT.id).setListener("PLAINTEXT"))));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put(topic0, Uuid.randomUuid());
topicIds.put(topic1, Uuid.randomUuid());
UpdateMetadataRequest request = new UpdateMetadataRequest.Builder(version, 1, 2, 3, partitionStates, liveBrokers, topicIds).build();
assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
assertEquals(liveBrokers, request.liveBrokers());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
ByteBuffer byteBuffer = request.serialize();
UpdateMetadataRequest deserializedRequest = new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(byteBuffer), version), version);
// Rack is only supported from version 2
if (version < 2) {
for (UpdateMetadataBroker liveBroker : liveBrokers) liveBroker.setRack("");
}
// Non plaintext listener name is only supported from version 3
if (version < 3) {
for (UpdateMetadataBroker liveBroker : liveBrokers) {
for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
endpoint.setListener(ListenerName.forSecurityProtocol(securityProtocol).value());
}
}
}
// Offline replicas are only supported from version 4
if (version < 4)
partitionStates.get(0).setOfflineReplicas(emptyList());
assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
assertEquals(liveBrokers, deserializedRequest.liveBrokers());
assertEquals(1, deserializedRequest.controllerId());
assertEquals(2, deserializedRequest.controllerEpoch());
// Broker epoch is only supported from version 5
if (version >= 5)
assertEquals(3, deserializedRequest.brokerEpoch());
else
assertEquals(-1, deserializedRequest.brokerEpoch());
long topicIdCount = deserializedRequest.data().topicStates().stream().map(UpdateMetadataRequestData.UpdateMetadataTopicState::topicId).filter(topicId -> topicId != Uuid.ZERO_UUID).count();
if (version >= 7)
assertEquals(2, topicIdCount);
else
assertEquals(0, topicIdCount);
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class RequestResponseTest method testFetchResponseV4.
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Map<Uuid, String> topicNames = new HashMap<>();
topicNames.put(Uuid.randomUuid(), "bar");
topicNames.put(Uuid.randomUuid(), "foo");
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponseData.AbortedTransaction> abortedTransactions = asList(new FetchResponseData.AbortedTransaction().setProducerId(10).setFirstOffset(100), new FetchResponseData.AbortedTransaction().setProducerId(15).setFirstOffset(50));
// Use zero UUID since this is an old request version.
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setAbortedTransactions(abortedTransactions).setRecords(records));
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(900000).setLastStableOffset(5).setRecords(records));
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(70000).setLastStableOffset(6).setRecords(records));
FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
FetchResponse deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4);
assertEquals(responseData.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().topicPartition(), Map.Entry::getValue)), deserialized.responseData(topicNames, (short) 4));
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class KafkaConsumerTest method initMetadata.
private void initMetadata(MockClient mockClient, Map<String, Integer> partitionCounts) {
Map<String, Uuid> metadataIds = new HashMap<>();
for (String name : partitionCounts.keySet()) {
metadataIds.put(name, topicIds.get(name));
}
MetadataResponse initialMetadata = RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, metadataIds);
mockClient.updateMetadata(initialMetadata);
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class MockAdminClient method addTopic.
public synchronized void addTopic(boolean internal, String name, List<TopicPartitionInfo> partitions, Map<String, String> configs, boolean usesTopicId) {
if (allTopics.containsKey(name)) {
throw new IllegalArgumentException(String.format("Topic %s was already added.", name));
}
for (TopicPartitionInfo partition : partitions) {
if (!brokers.contains(partition.leader())) {
throw new IllegalArgumentException("Leader broker unknown");
}
if (!brokers.containsAll(partition.replicas())) {
throw new IllegalArgumentException("Unknown brokers in replica list");
}
if (!brokers.containsAll(partition.isr())) {
throw new IllegalArgumentException("Unknown brokers in isr list");
}
}
ArrayList<String> logDirs = new ArrayList<>();
for (TopicPartitionInfo partition : partitions) {
if (partition.leader() != null) {
logDirs.add(brokerLogDirs.get(partition.leader().id()).get(0));
}
}
Uuid topicId;
if (usesTopicId) {
topicId = Uuid.randomUuid();
topicIds.put(name, topicId);
topicNames.put(topicId, name);
} else {
topicId = Uuid.ZERO_UUID;
}
allTopics.put(name, new TopicMetadata(topicId, internal, partitions, logDirs, configs));
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class MockAdminClient method handleDeleteTopicsUsingIds.
private Map<Uuid, KafkaFuture<Void>> handleDeleteTopicsUsingIds(Collection<Uuid> topicIdCollection, DeleteTopicsOptions options) {
Map<Uuid, KafkaFuture<Void>> deleteTopicsResult = new HashMap<>();
Collection<Uuid> topicIds = new ArrayList<>(topicIdCollection);
if (timeoutNextRequests > 0) {
for (final Uuid topicId : topicIds) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
deleteTopicsResult.put(topicId, future);
}
--timeoutNextRequests;
return deleteTopicsResult;
}
for (final Uuid topicId : topicIds) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
String name = topicNames.remove(topicId);
if (name == null || allTopics.remove(name) == null) {
future.completeExceptionally(new UnknownTopicOrPartitionException(String.format("Topic %s does not exist.", topicId)));
} else {
topicIds.remove(name);
future.complete(null);
}
deleteTopicsResult.put(topicId, future);
}
return deleteTopicsResult;
}
Aggregations