use of org.apache.kafka.common.message.DeleteTopicsRequestData in project kafka by apache.
the class DeleteTopicsRequestTest method testDeleteTopicsRequestNumTopics.
@Test
public void testDeleteTopicsRequestNumTopics() {
for (short version : DELETE_TOPICS.allVersions()) {
DeleteTopicsRequest request = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopicNames(Arrays.asList("topic1", "topic2")).setTimeoutMs(1000)).build(version);
DeleteTopicsRequest serializedRequest = DeleteTopicsRequest.parse(request.serialize(), version);
// createDeleteTopicsRequest sets 2 topics
assertEquals(2, request.numberOfTopics());
assertEquals(2, serializedRequest.numberOfTopics());
// Test using IDs
if (version >= 6) {
DeleteTopicsRequest requestWithIds = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopics(Arrays.asList(new DeleteTopicsRequestData.DeleteTopicState().setTopicId(Uuid.randomUuid()), new DeleteTopicsRequestData.DeleteTopicState().setTopicId(Uuid.randomUuid())))).build(version);
DeleteTopicsRequest serializedRequestWithIds = DeleteTopicsRequest.parse(requestWithIds.serialize(), version);
assertEquals(2, requestWithIds.numberOfTopics());
assertEquals(2, serializedRequestWithIds.numberOfTopics());
}
}
}
use of org.apache.kafka.common.message.DeleteTopicsRequestData in project kafka by apache.
the class KafkaAdminClient method getDeleteTopicsCall.
private Call getDeleteTopicsCall(final DeleteTopicsOptions options, final Map<String, KafkaFutureImpl<Void>> futures, final List<String> topics, final Map<String, ThrottlingQuotaExceededException> quotaExceededExceptions, final long now, final long deadline) {
return new Call("deleteTopics", deadline, new ControllerNodeProvider()) {
@Override
DeleteTopicsRequest.Builder createRequest(int timeoutMs) {
return new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopicNames(topics).setTimeoutMs(timeoutMs));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
// Check for controller change
handleNotControllerError(abstractResponse);
// Handle server responses for particular topics.
final DeleteTopicsResponse response = (DeleteTopicsResponse) abstractResponse;
final List<String> retryTopics = new ArrayList<>();
final Map<String, ThrottlingQuotaExceededException> retryTopicQuotaExceededExceptions = new HashMap<>();
for (DeletableTopicResult result : response.data().responses()) {
KafkaFutureImpl<Void> future = futures.get(result.name());
if (future == null) {
log.warn("Server response mentioned unknown topic {}", result.name());
} else {
ApiError error = new ApiError(result.errorCode(), result.errorMessage());
if (error.isFailure()) {
if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) {
ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException(response.throttleTimeMs(), error.messageWithFallback());
if (options.shouldRetryOnQuotaViolation()) {
retryTopics.add(result.name());
retryTopicQuotaExceededExceptions.put(result.name(), quotaExceededException);
} else {
future.completeExceptionally(quotaExceededException);
}
} else {
future.completeExceptionally(error.exception());
}
} else {
future.complete(null);
}
}
}
// If there are topics to retry, retry them; complete unrealized futures otherwise.
if (retryTopics.isEmpty()) {
// The server should send back a response for every topic. But do a sanity check anyway.
completeUnrealizedFutures(futures.entrySet().stream(), topic -> "The controller response did not contain a result for topic " + topic);
} else {
final long now = time.milliseconds();
final Call call = getDeleteTopicsCall(options, futures, retryTopics, retryTopicQuotaExceededExceptions, now, deadline);
runnable.call(call, now);
}
}
@Override
void handleFailure(Throwable throwable) {
// If there were any topics retries due to a quota exceeded exception, we propagate
// the initial error back to the caller if the request timed out.
maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now));
// Fail all the other remaining futures
completeAllExceptionally(futures.values(), throwable);
}
};
}
use of org.apache.kafka.common.message.DeleteTopicsRequestData in project kafka by apache.
the class DeleteTopicsRequestTest method testTopicIdsField.
@Test
public void testTopicIdsField() {
for (short version : DELETE_TOPICS.allVersions()) {
// Check topic IDs are handled correctly. We should only use this field on versions 6+.
Uuid topicId1 = Uuid.randomUuid();
Uuid topicId2 = Uuid.randomUuid();
List<Uuid> topicIds = Arrays.asList(topicId1, topicId2);
DeleteTopicsRequest requestWithIds = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopics(Arrays.asList(new DeleteTopicsRequestData.DeleteTopicState().setTopicId(topicId1), new DeleteTopicsRequestData.DeleteTopicState().setTopicId(topicId2)))).build(version);
if (version >= 6) {
DeleteTopicsRequest requestWithIdsSerialized = DeleteTopicsRequest.parse(requestWithIds.serialize(), version);
assertEquals(topicIds, requestWithIds.topicIds());
assertEquals(topicIds, requestWithIdsSerialized.topicIds());
// All topic names should be replaced with null
requestWithIds.data().topics().forEach(topic -> assertNull(topic.name()));
requestWithIdsSerialized.data().topics().forEach(topic -> assertNull(topic.name()));
} else {
// We should fail if version is less than 6.
assertThrows(UnsupportedVersionException.class, () -> requestWithIds.serialize());
}
}
}
use of org.apache.kafka.common.message.DeleteTopicsRequestData in project kafka by apache.
the class DeleteTopicsRequestTest method testNewTopicsField.
@Test
public void testNewTopicsField() {
for (short version : DELETE_TOPICS.allVersions()) {
String topic1 = "topic1";
String topic2 = "topic2";
List<String> topics = Arrays.asList(topic1, topic2);
DeleteTopicsRequest requestWithNames = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopics(Arrays.asList(new DeleteTopicsRequestData.DeleteTopicState().setName(topic1), new DeleteTopicsRequestData.DeleteTopicState().setName(topic2)))).build(version);
// Ensure we only use new topics field on versions 6+.
if (version >= 6) {
DeleteTopicsRequest requestWithNamesSerialized = DeleteTopicsRequest.parse(requestWithNames.serialize(), version);
assertEquals(topics, requestWithNames.topicNames());
assertEquals(topics, requestWithNamesSerialized.topicNames());
} else {
// We should fail if version is less than 6.
assertThrows(UnsupportedVersionException.class, () -> requestWithNames.serialize());
}
}
}
use of org.apache.kafka.common.message.DeleteTopicsRequestData in project kafka by apache.
the class DeleteTopicsRequestTest method testTopicNormalization.
@Test
public void testTopicNormalization() {
for (short version : DELETE_TOPICS.allVersions()) {
// Check topic names are in the correct place when using topicNames.
String topic1 = "topic1";
String topic2 = "topic2";
List<String> topics = Arrays.asList(topic1, topic2);
DeleteTopicsRequest requestWithNames = new DeleteTopicsRequest.Builder(new DeleteTopicsRequestData().setTopicNames(topics)).build(version);
DeleteTopicsRequest requestWithNamesSerialized = DeleteTopicsRequest.parse(requestWithNames.serialize(), version);
assertEquals(topics, requestWithNames.topicNames());
assertEquals(topics, requestWithNamesSerialized.topicNames());
if (version < 6) {
assertEquals(topics, requestWithNames.data().topicNames());
assertEquals(topics, requestWithNamesSerialized.data().topicNames());
} else {
// topics in TopicNames are moved to new topics field
assertEquals(topics, requestWithNames.data().topics().stream().map(DeleteTopicState::name).collect(Collectors.toList()));
assertEquals(topics, requestWithNamesSerialized.data().topics().stream().map(DeleteTopicState::name).collect(Collectors.toList()));
}
}
}
Aggregations