use of org.apache.kafka.common.Uuid in project kafka by apache.
the class GetListOffsetsCallsBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
MetadataResponseData data = new MetadataResponseData();
List<MetadataResponseTopic> mrTopicList = new ArrayList<>();
Set<String> topics = new HashSet<>();
for (int topicIndex = 0; topicIndex < topicCount; topicIndex++) {
Uuid topicId = Uuid.randomUuid();
String topicName = "topic-" + topicIndex;
MetadataResponseTopic mrTopic = new MetadataResponseTopic().setTopicId(topicId).setName(topicName).setErrorCode((short) 0).setIsInternal(false);
List<MetadataResponsePartition> mrPartitionList = new ArrayList<>();
for (int partition = 0; partition < partitionCount; partition++) {
TopicPartition tp = new TopicPartition(topicName, partition);
topics.add(tp.topic());
futures.put(tp, new KafkaFutureImpl<>());
topicPartitionOffsets.put(tp, OffsetSpec.latest());
MetadataResponsePartition mrPartition = new MetadataResponsePartition().setLeaderId(partition % numNodes).setPartitionIndex(partition).setIsrNodes(Arrays.asList(0, 1, 2)).setReplicaNodes(Arrays.asList(0, 1, 2)).setOfflineReplicas(Collections.emptyList()).setErrorCode((short) 0);
mrPartitionList.add(mrPartition);
}
mrTopic.setPartitions(mrPartitionList);
mrTopicList.add(mrTopic);
}
data.setTopics(new MetadataResponseData.MetadataResponseTopicCollection(mrTopicList.listIterator()));
long deadline = 0L;
short version = 0;
context = new MetadataOperationContext<>(topics, new ListOffsetsOptions(), deadline, futures);
context.setResponse(Optional.of(new MetadataResponse(data, version)));
AdminClientUnitTestEnv adminEnv = new AdminClientUnitTestEnv(mockCluster());
admin = (KafkaAdminClient) adminEnv.adminClient();
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchRequestBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
this.fetchData = new HashMap<>();
this.topicNames = new HashMap<>();
for (int topicIdx = 0; topicIdx < topicCount; topicIdx++) {
String topic = Uuid.randomUuid().toString();
Uuid id = Uuid.randomUuid();
topicNames.put(id, topic);
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(id, 0, 0, 4096, Optional.empty());
fetchData.put(new TopicPartition(topic, partitionId), partitionData);
}
}
this.header = new RequestHeader(ApiKeys.FETCH, ApiKeys.FETCH.latestVersion(), "jmh-benchmark", 100);
this.consumerRequest = FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion(), 0, 0, fetchData).build(ApiKeys.FETCH.latestVersion());
this.replicaRequest = FetchRequest.Builder.forReplica(ApiKeys.FETCH.latestVersion(), 1, 0, 0, fetchData).build(ApiKeys.FETCH.latestVersion());
this.requestBuffer = this.consumerRequest.serialize();
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionBenchmark method setUp.
@Setup(Level.Trial)
public void setUp() {
fetches = new LinkedHashMap<>();
handler = new FetchSessionHandler(LOG_CONTEXT, 1);
topicIds = new HashMap<>();
FetchSessionHandler.Builder builder = handler.newBuilder();
Uuid id = Uuid.randomUuid();
topicIds.put("foo", id);
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> respMap = new LinkedHashMap<>();
for (int i = 0; i < partitionCount; i++) {
TopicPartition tp = new TopicPartition("foo", i);
FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData(id, 0, 0, 200, Optional.empty());
fetches.put(tp, partitionData);
builder.add(tp, partitionData);
respMap.put(new TopicIdPartition(id, tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0));
}
builder.build();
// build and handle an initial response so that the next fetch will be incremental
handler.handleResponse(FetchResponse.of(Errors.NONE, 0, 1, respMap), ApiKeys.FETCH.latestVersion());
int counter = 0;
for (TopicPartition topicPartition : new ArrayList<>(fetches.keySet())) {
if (updatedPercentage != 0 && counter % (100 / updatedPercentage) == 0) {
// reorder in fetch session, and update log start offset
fetches.remove(topicPartition);
fetches.put(topicPartition, new FetchRequest.PartitionData(id, 50, 40, 200, Optional.empty()));
}
counter++;
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class KafkaAdminClientTest method testDeleteTopicsRetryThrottlingExceptionWhenEnabled.
@Test
public void testDeleteTopicsRetryThrottlingExceptionWhenEnabled() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic1", "topic2", "topic3"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic1", Errors.NONE), deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic2"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED)));
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic2"), prepareDeleteTopicsResponse(0, deletableTopicResult("topic2", Errors.NONE)));
DeleteTopicsResult result = env.adminClient().deleteTopics(asList("topic1", "topic2", "topic3"), new DeleteTopicsOptions().retryOnQuotaViolation(true));
assertNull(result.topicNameValues().get("topic1").get());
assertNull(result.topicNameValues().get("topic2").get());
TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class);
// With topic IDs
Uuid topicId1 = Uuid.randomUuid();
Uuid topicId2 = Uuid.randomUuid();
Uuid topicId3 = Uuid.randomUuid();
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2, topicId3), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE), deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResultWithId(topicId3, Errors.UNKNOWN_TOPIC_ID)));
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId2), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED)));
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId2), prepareDeleteTopicsResponse(0, deletableTopicResultWithId(topicId2, Errors.NONE)));
DeleteTopicsResult resultIds = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(asList(topicId1, topicId2, topicId3)), new DeleteTopicsOptions().retryOnQuotaViolation(true));
assertNull(resultIds.topicIdValues().get(topicId1).get());
assertNull(resultIds.topicIdValues().get(topicId2).get());
TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class);
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class MetadataTest method testMetadataMerge.
@Test
public void testMetadataMerge() {
Time time = new MockTime();
Map<String, Uuid> topicIds = new HashMap<>();
final AtomicReference<Set<String>> retainTopics = new AtomicReference<>(new HashSet<>());
metadata = new Metadata(refreshBackoffMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
return retainTopics.get().contains(topic);
}
};
// Initialize a metadata instance with two topic variants "old" and "keep". Both will be retained.
String oldClusterId = "oldClusterId";
int oldNodes = 2;
Map<String, Errors> oldTopicErrors = new HashMap<>();
oldTopicErrors.put("oldInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
oldTopicErrors.put("keepInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
oldTopicErrors.put("oldUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
oldTopicErrors.put("keepUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
Map<String, Integer> oldTopicPartitionCounts = new HashMap<>();
oldTopicPartitionCounts.put("oldValidTopic", 2);
oldTopicPartitionCounts.put("keepValidTopic", 3);
retainTopics.set(Utils.mkSet("oldInvalidTopic", "keepInvalidTopic", "oldUnauthorizedTopic", "keepUnauthorizedTopic", "oldValidTopic", "keepValidTopic"));
topicIds.put("oldValidTopic", Uuid.randomUuid());
topicIds.put("keepValidTopic", Uuid.randomUuid());
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds(oldClusterId, oldNodes, oldTopicErrors, oldTopicPartitionCounts, _tp -> 100, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds1 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds1.get(topic), topicIds.get(topic)));
// Update the metadata to add a new topic variant, "new", which will be retained with "keep". Note this
// means that all of the "old" topics should be dropped.
Cluster cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), oldClusterId);
assertEquals(cluster.nodes().size(), oldNodes);
assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("oldInvalidTopic", "keepInvalidTopic")));
assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("oldUnauthorizedTopic", "keepUnauthorizedTopic")));
assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("oldValidTopic", "keepValidTopic")));
assertEquals(cluster.partitionsForTopic("oldValidTopic").size(), 2);
assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 3);
assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
String newClusterId = "newClusterId";
int newNodes = oldNodes + 1;
Map<String, Errors> newTopicErrors = new HashMap<>();
newTopicErrors.put("newInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
newTopicErrors.put("newUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
Map<String, Integer> newTopicPartitionCounts = new HashMap<>();
newTopicPartitionCounts.put("keepValidTopic", 2);
newTopicPartitionCounts.put("newValidTopic", 4);
retainTopics.set(Utils.mkSet("keepInvalidTopic", "newInvalidTopic", "keepUnauthorizedTopic", "newUnauthorizedTopic", "keepValidTopic", "newValidTopic"));
topicIds.put("newValidTopic", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 200, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
topicIds.remove("oldValidTopic");
Map<String, Uuid> metadataTopicIds2 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds2.get(topic), topicIds.get(topic)));
assertNull(metadataTopicIds2.get("oldValidTopic"));
cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), newClusterId);
assertEquals(cluster.nodes().size(), newNodes);
assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("keepInvalidTopic", "newInvalidTopic")));
assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("keepUnauthorizedTopic", "newUnauthorizedTopic")));
assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("keepValidTopic", "newValidTopic")));
assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 2);
assertEquals(cluster.partitionsForTopic("newValidTopic").size(), 4);
assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
// Perform another metadata update, but this time all topic metadata should be cleared.
retainTopics.set(Collections.emptySet());
metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 300, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds3 = metadata.topicIds();
topicIds.forEach((topicName, topicId) -> assertNull(metadataTopicIds3.get(topicName)));
cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), newClusterId);
assertEquals(cluster.nodes().size(), newNodes);
assertEquals(cluster.invalidTopics(), Collections.emptySet());
assertEquals(cluster.unauthorizedTopics(), Collections.emptySet());
assertEquals(cluster.topics(), Collections.emptySet());
assertTrue(cluster.topicIds().isEmpty());
}
Aggregations