use of org.apache.kafka.clients.admin.NewTopic in project kafka by apache.
the class MirrorSourceConnectorTest method testNewTopicConfigs.
@Test
public void testNewTopicConfigs() throws Exception {
Map<String, Object> filterConfig = new HashMap<>();
filterConfig.put(DefaultConfigPropertyFilter.CONFIG_PROPERTIES_EXCLUDE_CONFIG, "follower\\.replication\\.throttled\\.replicas, " + "leader\\.replication\\.throttled\\.replicas, " + "message\\.timestamp\\.difference\\.max\\.ms, " + "message\\.timestamp\\.type, " + "unclean\\.leader\\.election\\.enable, " + "min\\.insync\\.replicas," + "exclude_param.*");
DefaultConfigPropertyFilter filter = new DefaultConfigPropertyFilter();
filter.configure(filterConfig);
MirrorSourceConnector connector = spy(new MirrorSourceConnector(new SourceAndTarget("source", "target"), new DefaultReplicationPolicy(), x -> true, filter));
final String topic = "testtopic";
List<ConfigEntry> entries = new ArrayList<>();
entries.add(new ConfigEntry("name-1", "value-1"));
entries.add(new ConfigEntry("exclude_param.param1", "value-param1"));
entries.add(new ConfigEntry("min.insync.replicas", "2"));
Config config = new Config(entries);
doReturn(Collections.singletonMap(topic, config)).when(connector).describeTopicConfigs(any());
doAnswer(invocation -> {
Map<String, NewTopic> newTopics = invocation.getArgument(0);
assertNotNull(newTopics.get("source." + topic));
Map<String, String> targetConfig = newTopics.get("source." + topic).configs();
// property 'name-1' isn't defined in the exclude filter -> should be replicated
assertNotNull(targetConfig.get("name-1"), "should replicate properties");
// this property is in default list, just double check it:
String prop1 = "min.insync.replicas";
assertNull(targetConfig.get(prop1), "should not replicate excluded properties " + prop1);
// this property is only in exclude filter custom parameter, also tests regex on the way:
String prop2 = "exclude_param.param1";
assertNull(targetConfig.get(prop2), "should not replicate excluded properties " + prop2);
return null;
}).when(connector).createNewTopics(any());
connector.createNewTopics(Collections.singleton(topic), Collections.singletonMap(topic, 1L));
verify(connector).createNewTopics(any(), any());
}
use of org.apache.kafka.clients.admin.NewTopic in project kafka by apache.
the class KafkaExactlyOnceDemo method recreateTopics.
private static void recreateTopics(final int numPartitions) throws ExecutionException, InterruptedException {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaProperties.KAFKA_SERVER_URL + ":" + KafkaProperties.KAFKA_SERVER_PORT);
Admin adminClient = Admin.create(props);
List<String> topicsToDelete = Arrays.asList(INPUT_TOPIC, OUTPUT_TOPIC);
deleteTopic(adminClient, topicsToDelete);
// Check topic existence in a retry loop
while (true) {
System.out.println("Making sure the topics are deleted successfully: " + topicsToDelete);
Set<String> listedTopics = adminClient.listTopics().names().get();
System.out.println("Current list of topics: " + listedTopics);
boolean hasTopicInfo = false;
for (String listedTopic : listedTopics) {
if (topicsToDelete.contains(listedTopic)) {
hasTopicInfo = true;
break;
}
}
if (!hasTopicInfo) {
break;
}
Thread.sleep(1000);
}
// Create topics in a retry loop
while (true) {
final short replicationFactor = 1;
final List<NewTopic> newTopics = Arrays.asList(new NewTopic(INPUT_TOPIC, numPartitions, replicationFactor), new NewTopic(OUTPUT_TOPIC, numPartitions, replicationFactor));
try {
adminClient.createTopics(newTopics).all().get();
System.out.println("Created new topics: " + newTopics);
break;
} catch (ExecutionException e) {
if (!(e.getCause() instanceof TopicExistsException)) {
throw e;
}
System.out.println("Metadata of the old topics are not cleared yet...");
deleteTopic(adminClient, topicsToDelete);
Thread.sleep(1000);
}
}
}
use of org.apache.kafka.clients.admin.NewTopic in project kafka by apache.
the class DeadLetterQueueReporter method createAndSetup.
public static DeadLetterQueueReporter createAndSetup(Map<String, Object> adminProps, ConnectorTaskId id, SinkConnectorConfig sinkConfig, Map<String, Object> producerProps, ErrorHandlingMetrics errorHandlingMetrics) {
String topic = sinkConfig.dlqTopicName();
try (Admin admin = Admin.create(adminProps)) {
if (!admin.listTopics().names().get().contains(topic)) {
log.error("Topic {} doesn't exist. Will attempt to create topic.", topic);
NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor());
admin.createTopics(singleton(schemaTopicRequest)).all().get();
}
} catch (InterruptedException e) {
throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
} catch (ExecutionException e) {
if (!(e.getCause() instanceof TopicExistsException)) {
throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e);
}
}
KafkaProducer<byte[], byte[]> dlqProducer = new KafkaProducer<>(producerProps);
return new DeadLetterQueueReporter(dlqProducer, sinkConfig, id, errorHandlingMetrics);
}
use of org.apache.kafka.clients.admin.NewTopic in project kafka by apache.
the class InternalTopicManager method setup.
/**
* Sets up internal topics.
*
* Either the given topic are all created or the method fails with an exception.
*
* @param topicConfigs internal topics to setup
*/
public void setup(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to setup internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final Map<String, Map<String, String>> streamsSideTopicConfigs = topicConfigs.values().stream().collect(Collectors.toMap(InternalTopicConfig::name, topicConfig -> topicConfig.getProperties(defaultTopicConfigs, windowChangeLogAdditionalRetention)));
final Set<String> createdTopics = new HashSet<>();
final Set<String> topicStillToCreate = new HashSet<>(topicConfigs.keySet());
while (!topicStillToCreate.isEmpty()) {
final Set<NewTopic> newTopics = topicStillToCreate.stream().map(topicName -> new NewTopic(topicName, topicConfigs.get(topicName).numberOfPartitions(), Optional.of(replicationFactor)).configs(streamsSideTopicConfigs.get(topicName))).collect(Collectors.toSet());
log.info("Going to create internal topics: " + newTopics);
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
processCreateTopicResults(createTopicsResult, topicStillToCreate, createdTopics, deadline);
maybeSleep(Collections.singletonList(topicStillToCreate), deadline, "created");
}
log.info("Completed setup of internal topics {}.", topicConfigs.keySet());
}
use of org.apache.kafka.clients.admin.NewTopic in project kafka by apache.
the class InternalTopicManager method makeReady.
/**
* Prepares a set of given internal topics.
*
* If a topic does not exist creates a new topic.
* If a topic with the correct number of partitions exists ignores it.
* If a topic exists already but has different number of partitions we fail and throw exception requesting user to reset the app before restarting again.
* @return the set of topics which had to be newly created
*/
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.getProperties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.", internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), topicConfig);
newTopics.add(new NewTopic(internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), Optional.of(replicationFactor)).configs(topicConfig));
}
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info("Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n" + "Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n" + "Error message was: {}", topicName, retryBackOffMs, cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" + "Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null && errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format("Could not create topic %s, because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.", topicName));
}
} else {
throw new StreamsException(String.format("Could not create topic %s.", topicName), cause);
}
}
} catch (final TimeoutException retriableException) {
log.error("Creating topic {} timed out.\n" + "Error message was: {}", topicName, retriableException.toString());
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info("Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}", topicsNotReady, retryBackOffMs, deadlineMs - currentWallClockMs);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
}
Aggregations