use of org.apache.kafka.common.errors.TopicExistsException in project kafka by apache.
the class InternalTopicManager method makeReady.
/**
* Prepares a set of given internal topics.
*
* If a topic does not exist creates a new topic.
* If a topic with the correct number of partitions exists ignores it.
* If a topic exists already but has different number of partitions we fail and throw exception requesting user to reset the app before restarting again.
* @return the set of topics which had to be newly created
*/
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.getProperties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.", internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), topicConfig);
newTopics.add(new NewTopic(internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), Optional.of(replicationFactor)).configs(topicConfig));
}
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info("Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n" + "Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n" + "Error message was: {}", topicName, retryBackOffMs, cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" + "Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null && errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format("Could not create topic %s, because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.", topicName));
}
} else {
throw new StreamsException(String.format("Could not create topic %s.", topicName), cause);
}
}
} catch (final TimeoutException retriableException) {
log.error("Creating topic {} timed out.\n" + "Error message was: {}", topicName, retriableException.toString());
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info("Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}", topicsNotReady, retryBackOffMs, deadlineMs - currentWallClockMs);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
}
use of org.apache.kafka.common.errors.TopicExistsException in project kafka by apache.
the class InternalTopicManagerTest method shouldCompleteTopicValidationOnRetry.
@Test
public void shouldCompleteTopicValidationOnRetry() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final TopicPartitionInfo partitionInfo = new TopicPartitionInfo(0, broker1, Collections.singletonList(broker1), Collections.singletonList(broker1));
final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessFuture = new KafkaFutureImpl<>();
final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>();
topicDescriptionSuccessFuture.complete(new TopicDescription(topic1, false, Collections.singletonList(partitionInfo), Collections.emptySet()));
topicDescriptionFailFuture.completeExceptionally(new UnknownTopicOrPartitionException("KABOOM!"));
final KafkaFutureImpl<CreateTopicsResult.TopicMetadataAndConfig> topicCreationFuture = new KafkaFutureImpl<>();
topicCreationFuture.completeExceptionally(new TopicExistsException("KABOOM!"));
// let the first describe succeed on topic, and fail on topic2, and then let creation throws topics-existed;
// it should retry with just topic2 and then let it succeed
EasyMock.expect(admin.describeTopics(mkSet(topic1, topic2))).andReturn(new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionSuccessFuture), mkEntry(topic2, topicDescriptionFailFuture)))).once();
EasyMock.expect(admin.createTopics(Collections.singleton(new NewTopic(topic2, Optional.of(1), Optional.of((short) 1)).configs(mkMap(mkEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), mkEntry(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime")))))).andReturn(new MockCreateTopicsResult(Collections.singletonMap(topic2, topicCreationFuture))).once();
EasyMock.expect(admin.describeTopics(Collections.singleton(topic2))).andReturn(new MockDescribeTopicsResult(Collections.singletonMap(topic2, topicDescriptionSuccessFuture)));
EasyMock.replay(admin);
final InternalTopicConfig topicConfig = new UnwindowedChangelogTopicConfig(topic1, Collections.emptyMap());
topicConfig.setNumberOfPartitions(1);
final InternalTopicConfig topic2Config = new UnwindowedChangelogTopicConfig(topic2, Collections.emptyMap());
topic2Config.setNumberOfPartitions(1);
topicManager.makeReady(mkMap(mkEntry(topic1, topicConfig), mkEntry(topic2, topic2Config)));
EasyMock.verify(admin);
}
use of org.apache.kafka.common.errors.TopicExistsException in project kafka by apache.
the class WorkerUtils method createTopics.
/**
* Creates Kafka topics and returns a list of topics that already exist
* @param log The logger to use
* @param adminClient AdminClient
* @param topics List of topics to create
* @return Collection of topics names that already exist.
* @throws Throwable if creation of one or more topics fails (except for topic exists case).
*/
private static Collection<String> createTopics(Logger log, Admin adminClient, Collection<NewTopic> topics) throws Throwable {
long startMs = Time.SYSTEM.milliseconds();
int tries = 0;
List<String> existingTopics = new ArrayList<>();
Map<String, NewTopic> newTopics = new HashMap<>();
for (NewTopic newTopic : topics) {
newTopics.put(newTopic.name(), newTopic);
}
List<String> topicsToCreate = new ArrayList<>(newTopics.keySet());
while (true) {
log.info("Attempting to create {} topics (try {})...", topicsToCreate.size(), ++tries);
Map<String, Future<Void>> creations = new HashMap<>();
while (!topicsToCreate.isEmpty()) {
List<NewTopic> newTopicsBatch = new ArrayList<>();
for (int i = 0; (i < MAX_CREATE_TOPICS_BATCH_SIZE) && !topicsToCreate.isEmpty(); i++) {
String topicName = topicsToCreate.remove(0);
newTopicsBatch.add(newTopics.get(topicName));
}
creations.putAll(adminClient.createTopics(newTopicsBatch).values());
}
// timeout. This is a workaround for KAFKA-6368.
for (Map.Entry<String, Future<Void>> entry : creations.entrySet()) {
String topicName = entry.getKey();
Future<Void> future = entry.getValue();
try {
future.get();
log.debug("Successfully created {}.", topicName);
} catch (Exception e) {
if ((e.getCause() instanceof TimeoutException) || (e.getCause() instanceof NotEnoughReplicasException)) {
log.warn("Attempt to create topic `{}` failed: {}", topicName, e.getCause().getMessage());
topicsToCreate.add(topicName);
} else if (e.getCause() instanceof TopicExistsException) {
log.info("Topic {} already exists.", topicName);
existingTopics.add(topicName);
} else {
log.warn("Failed to create {}", topicName, e.getCause());
throw e.getCause();
}
}
}
if (topicsToCreate.isEmpty()) {
break;
}
if (Time.SYSTEM.milliseconds() > startMs + CREATE_TOPICS_CALL_TIMEOUT) {
String str = "Unable to create topic(s): " + Utils.join(topicsToCreate, ", ") + "after " + tries + " attempt(s)";
log.warn(str);
throw new TimeoutException(str);
}
}
return existingTopics;
}
Aggregations