use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaTableTestBase method createTestTopic.
public void createTestTopic(String topic, int numPartitions, int replicationFactor) {
Map<String, Object> properties = new HashMap<>();
properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers());
try (AdminClient admin = AdminClient.create(properties)) {
admin.createTopics(Collections.singletonList(new NewTopic(topic, numPartitions, (short) replicationFactor)));
}
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaEnumeratorTest method testDiscoverPartitionsPeriodically.
@Test(timeout = 30000L)
public void testDiscoverPartitionsPeriodically() throws Throwable {
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY, INCLUDE_DYNAMIC_TOPIC);
AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
startEnumeratorAndRegisterReaders(context, enumerator);
// invoke partition discovery callable again and there should be no new assignments.
runPeriodicPartitionDiscovery(context);
assertEquals("No assignments should be made because there is no partition change", 2, context.getSplitsAssignmentSequence().size());
// create the dynamic topic.
adminClient.createTopics(Collections.singleton(new NewTopic(DYNAMIC_TOPIC_NAME, NUM_PARTITIONS_DYNAMIC_TOPIC, (short) 1))).all().get();
// invoke partition discovery callable again.
while (true) {
runPeriodicPartitionDiscovery(context);
if (context.getSplitsAssignmentSequence().size() < 3) {
Thread.sleep(10);
} else {
break;
}
}
verifyLastReadersAssignments(context, Arrays.asList(READER0, READER1), Collections.singleton(DYNAMIC_TOPIC_NAME), 3);
} finally {
try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
adminClient.deleteTopics(Collections.singleton(DYNAMIC_TOPIC_NAME)).all().get();
} catch (Exception e) {
// Let it go.
}
}
}
use of org.apache.kafka.clients.admin.AdminClient in project debezium by debezium.
the class KafkaDatabaseHistory method initializeStorage.
@Override
public void initializeStorage() {
super.initializeStorage();
try (AdminClient admin = AdminClient.create(this.producerConfig.asProperties())) {
// Find default replication factor
Config brokerConfig = getKafkaBrokerConfig(admin);
final short replicationFactor = Short.parseShort(brokerConfig.get(DEFAULT_TOPIC_REPLICATION_FACTOR_PROP_NAME).value());
// Create topic
final NewTopic topic = new NewTopic(topicName, (short) 1, replicationFactor);
topic.configs(Collect.hashMapOf("cleanup.policy", "delete", "retention.ms", Long.toString(Long.MAX_VALUE), "retention.bytes", "-1"));
admin.createTopics(Collections.singleton(topic));
logger.info("Database history topic '{}' created", topic);
} catch (Exception e) {
throw new ConnectException("Creation of database history topic failed, please create the topic manually", e);
}
}
use of org.apache.kafka.clients.admin.AdminClient in project eventapis by kloiasoft.
the class AutomaticTopicConfiguration method init.
@PostConstruct
public void init() {
AdminClient adminClient = adminClient();
try {
StopWatch stopWatch = new StopWatch("CheckAndCreateTopics");
stopWatch.start("CheckAndCreateTopics");
ClassPathScanningCandidateComponentProvider provider = new ClassPathScanningCandidateComponentProvider(false);
provider.addIncludeFilter(new AssignableTypeFilter(PublishedEvent.class));
// provider.addExcludeFilter(new AssignableTypeFilter(ReceivedEvent.class));
Set<BeanDefinition> candidateComponents = provider.findCandidateComponents(eventApisConfiguration.getBaseEventsPackage());
int numberOfNodes = 1;
try {
Collection<Node> nodes = adminClient.describeCluster().nodes().get();
numberOfNodes = nodes.size();
} catch (InterruptedException | ExecutionException e) {
log.warn("Error while finding number of Nodes:" + e.getMessage(), e);
}
for (BeanDefinition candidateComponent : candidateComponents) {
Class<PublishedEvent> beanClass;
try {
beanClass = (Class<PublishedEvent>) Class.forName(candidateComponent.getBeanClassName());
String topicName = beanClass.getSimpleName();
log.info("Candidate {} to Create Topic:", topicName);
try {
adminClient.describeTopics(Collections.singleton(topicName)).all().get();
} catch (UnknownTopicOrPartitionException | ExecutionException exception) {
if (!(exception.getCause() instanceof UnknownTopicOrPartitionException))
throw exception;
log.warn("Topic {} does not exists, trying to create", topicName);
try {
adminClient.createTopics(Collections.singleton(new NewTopic(topicName, numberOfNodes, (short) 1)));
log.info("Topic {} is Created Successfully:", topicName);
} catch (Exception topicCreationEx) {
log.warn("Error while creating Topic:" + topicCreationEx.getMessage(), topicCreationEx);
}
}
} catch (ClassNotFoundException | InterruptedException | ExecutionException exception) {
log.warn("Error while checking Topic:" + candidateComponent.toString() + " message: " + exception.getMessage(), exception);
}
}
stopWatch.stop();
log.debug(stopWatch.prettyPrint());
} finally {
adminClient.close();
}
}
use of org.apache.kafka.clients.admin.AdminClient in project drill by apache.
the class TestKafkaSuit method createTopicHelper.
public static void createTopicHelper(String topicName, int partitions) throws ExecutionException, InterruptedException {
try (AdminClient adminClient = initAdminClient()) {
NewTopic newTopic = new NewTopic(topicName, partitions, (short) 1);
Map<String, String> topicConfigs = new HashMap<>();
topicConfigs.put(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime");
topicConfigs.put(TopicConfig.RETENTION_MS_CONFIG, "-1");
newTopic.configs(topicConfigs);
CreateTopicsResult result = adminClient.createTopics(Collections.singletonList(newTopic));
result.all().get();
}
}
Aggregations