use of org.apache.kafka.common.Uuid in project kafka by apache.
the class MetadataTest method testEpochUpdateOnChangedTopicIds.
@Test
public void testEpochUpdateOnChangedTopicIds() {
TopicPartition tp = new TopicPartition("topic-1", 0);
Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
MetadataResponse metadataResponse = emptyMetadataResponse();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// Start with a topic with no topic ID
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 100);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(100), metadata.lastSeenLeaderEpoch(tp));
// If the older topic ID is null, we should go with the new topic ID as the leader epoch
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 2L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Don't cause update if it's the same one
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 3L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Update if we see newer epoch
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 12, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 4L);
assertEquals(Optional.of(12), metadata.lastSeenLeaderEpoch(tp));
// We should also update if we see a new topicId even if the epoch is lower
Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 3, newTopicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 5L);
assertEquals(Optional.of(3), metadata.lastSeenLeaderEpoch(tp));
// Finally, update when the topic ID is new and the epoch is higher
Map<String, Uuid> newTopicIds2 = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 20, newTopicIds2);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 6L);
assertEquals(Optional.of(20), metadata.lastSeenLeaderEpoch(tp));
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class MetadataTest method testMetadataMergeOnIdDowngrade.
@Test
public void testMetadataMergeOnIdDowngrade() {
Time time = new MockTime();
Map<String, Uuid> topicIds = new HashMap<>();
final AtomicReference<Set<String>> retainTopics = new AtomicReference<>(new HashSet<>());
metadata = new Metadata(refreshBackoffMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
return retainTopics.get().contains(topic);
}
};
// Initialize a metadata instance with two topics. Both will be retained.
String clusterId = "clusterId";
int nodes = 2;
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("validTopic1", 2);
topicPartitionCounts.put("validTopic2", 3);
retainTopics.set(Utils.mkSet("validTopic1", "validTopic2"));
topicIds.put("validTopic1", Uuid.randomUuid());
topicIds.put("validTopic2", Uuid.randomUuid());
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds(clusterId, nodes, Collections.emptyMap(), topicPartitionCounts, _tp -> 100, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds1 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds1.get(topic), topicIds.get(topic)));
// Try removing the topic ID from keepValidTopic (simulating receiving a request from a controller with an older IBP)
topicIds.remove("validTopic1");
metadataResponse = RequestTestUtils.metadataUpdateWithIds(clusterId, nodes, Collections.emptyMap(), topicPartitionCounts, _tp -> 200, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds2 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds2.get(topic), topicIds.get(topic)));
Cluster cluster = metadata.fetch();
// We still have the topic, but it just doesn't have an ID.
assertEquals(Utils.mkSet("validTopic1", "validTopic2"), cluster.topics());
assertEquals(2, cluster.partitionsForTopic("validTopic1").size());
assertEquals(new HashSet<>(topicIds.values()), new HashSet<>(cluster.topicIds()));
assertEquals(Uuid.ZERO_UUID, cluster.topicId("validTopic1"));
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class KafkaAdminClientTest method testDeleteTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeOut.
@Test
public void testDeleteTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeOut() throws Exception {
long defaultApiTimeout = 60000;
MockTime time = new MockTime();
try (AdminClientUnitTestEnv env = mockClientEnv(time, AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, String.valueOf(defaultApiTimeout))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic1", "topic2", "topic3"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic1", Errors.NONE), deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic2"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED)));
DeleteTopicsResult result = env.adminClient().deleteTopics(asList("topic1", "topic2", "topic3"), new DeleteTopicsOptions().retryOnQuotaViolation(true));
// Wait until the prepared attempts have consumed
TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting DeleteTopics requests");
// Wait until the next request is sent out
TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting next DeleteTopics request");
// Advance time past the default api timeout to time out the inflight request
time.sleep(defaultApiTimeout + 1);
assertNull(result.topicNameValues().get("topic1").get());
ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"), ThrottlingQuotaExceededException.class);
assertEquals(0, e.throttleTimeMs());
TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class);
// With topic IDs
Uuid topicId1 = Uuid.randomUuid();
Uuid topicId2 = Uuid.randomUuid();
Uuid topicId3 = Uuid.randomUuid();
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2, topicId3), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE), deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResultWithId(topicId3, Errors.UNKNOWN_TOPIC_ID)));
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId2), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED)));
DeleteTopicsResult resultIds = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(asList(topicId1, topicId2, topicId3)), new DeleteTopicsOptions().retryOnQuotaViolation(true));
// Wait until the prepared attempts have consumed
TestUtils.waitForCondition(() -> env.kafkaClient().numAwaitingResponses() == 0, "Failed awaiting DeleteTopics requests");
// Wait until the next request is sent out
TestUtils.waitForCondition(() -> env.kafkaClient().inFlightRequestCount() == 1, "Failed awaiting next DeleteTopics request");
// Advance time past the default api timeout to time out the inflight request
time.sleep(defaultApiTimeout + 1);
assertNull(resultIds.topicIdValues().get(topicId1).get());
e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ThrottlingQuotaExceededException.class);
assertEquals(0, e.throttleTimeMs());
TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class);
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class KafkaAdminClientTest method testDeleteTopicsPartialResponse.
@Test
public void testDeleteTopicsPartialResponse() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("myTopic", "myOtherTopic"), prepareDeleteTopicsResponse(1000, deletableTopicResult("myTopic", Errors.NONE)));
DeleteTopicsResult result = env.adminClient().deleteTopics(asList("myTopic", "myOtherTopic"), new DeleteTopicsOptions());
result.topicNameValues().get("myTopic").get();
TestUtils.assertFutureThrows(result.topicNameValues().get("myOtherTopic"), ApiException.class);
// With topic IDs
Uuid topicId1 = Uuid.randomUuid();
Uuid topicId2 = Uuid.randomUuid();
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE)));
DeleteTopicsResult resultIds = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(asList(topicId1, topicId2)), new DeleteTopicsOptions());
resultIds.topicIdValues().get(topicId1).get();
TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ApiException.class);
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class KafkaAdminClientTest method testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled.
@Test
public void testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopics("topic1", "topic2", "topic3"), prepareDeleteTopicsResponse(1000, deletableTopicResult("topic1", Errors.NONE), deletableTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
DeleteTopicsResult result = env.adminClient().deleteTopics(asList("topic1", "topic2", "topic3"), new DeleteTopicsOptions().retryOnQuotaViolation(false));
assertNull(result.topicNameValues().get("topic1").get());
ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"), ThrottlingQuotaExceededException.class);
assertEquals(1000, e.throttleTimeMs());
TestUtils.assertFutureError(result.topicNameValues().get("topic3"), TopicExistsException.class);
// With topic IDs
Uuid topicId1 = Uuid.randomUuid();
Uuid topicId2 = Uuid.randomUuid();
Uuid topicId3 = Uuid.randomUuid();
env.kafkaClient().prepareResponse(expectDeleteTopicsRequestWithTopicIds(topicId1, topicId2, topicId3), prepareDeleteTopicsResponse(1000, deletableTopicResultWithId(topicId1, Errors.NONE), deletableTopicResultWithId(topicId2, Errors.THROTTLING_QUOTA_EXCEEDED), deletableTopicResultWithId(topicId3, Errors.UNKNOWN_TOPIC_ID)));
DeleteTopicsResult resultIds = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(asList(topicId1, topicId2, topicId3)), new DeleteTopicsOptions().retryOnQuotaViolation(false));
assertNull(resultIds.topicIdValues().get(topicId1).get());
e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ThrottlingQuotaExceededException.class);
assertEquals(1000, e.throttleTimeMs());
TestUtils.assertFutureError(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class);
}
}
Aggregations