use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaTestEnvironmentImpl method deleteTestTopic.
@Override
public void deleteTestTopic(String topic) {
LOG.info("Deleting topic {}", topic);
Properties props = getSecureProperties();
props.putAll(getStandardProperties());
String clientId = Long.toString(new Random().nextLong());
props.put("client.id", clientId);
AdminClient adminClient = AdminClient.create(props);
// closure.
try {
tryDelete(adminClient, topic);
} catch (Exception e) {
e.printStackTrace();
fail(String.format("Delete test topic : %s failed, %s", topic, e.getMessage()));
} finally {
adminClient.close(Duration.ofMillis(5000L));
maybePrintDanglingThreadStacktrace(clientId);
}
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaTestEnvironmentImpl method createTestTopic.
@Override
public void createTestTopic(String topic, int numberOfPartitions, int replicationFactor, Properties properties) {
LOG.info("Creating topic {}", topic);
try (AdminClient adminClient = AdminClient.create(getStandardProperties())) {
NewTopic topicObj = new NewTopic(topic, numberOfPartitions, (short) replicationFactor);
adminClient.createTopics(Collections.singleton(topicObj)).all().get();
CommonTestUtils.waitUtil(() -> {
Map<String, TopicDescription> topicDescriptions;
try {
topicDescriptions = adminClient.describeTopics(Collections.singleton(topic)).all().get(REQUEST_TIMEOUT_SECONDS, TimeUnit.SECONDS);
} catch (Exception e) {
LOG.warn("Exception caught when describing Kafka topics", e);
return false;
}
if (topicDescriptions == null || !topicDescriptions.containsKey(topic)) {
return false;
}
TopicDescription topicDescription = topicDescriptions.get(topic);
return topicDescription.partitions().size() == numberOfPartitions;
}, Duration.ofSeconds(30), String.format("New topic \"%s\" is not ready within timeout", topicObj));
} catch (Exception e) {
e.printStackTrace();
fail("Create test topic : " + topic + " failed, " + e.getMessage());
}
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaTestEnvironmentImpl method unpause.
private void unpause(int brokerId) throws Exception {
if (!pausedBroker.contains(brokerId)) {
LOG.warn("Broker {} is already running. Skipping unpause operation", brokerId);
return;
}
DockerClientFactory.instance().client().unpauseContainerCmd(brokers.get(brokerId).getContainerId()).exec();
try (AdminClient adminClient = AdminClient.create(getStandardProperties())) {
CommonTestUtils.waitUtil(() -> {
try {
return adminClient.describeCluster().nodes().get().stream().anyMatch((node) -> node.id() == brokerId);
} catch (Exception e) {
return false;
}
}, Duration.ofSeconds(30), String.format("The paused broker %d is not recovered within timeout", brokerId));
}
pausedBroker.remove(brokerId);
LOG.info("Broker {} is resumed", brokerId);
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaTableTestBase method getConsumerOffset.
public Map<TopicPartition, OffsetAndMetadata> getConsumerOffset(String groupId) {
Map<String, Object> properties = new HashMap<>();
properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers());
try (AdminClient admin = AdminClient.create(properties)) {
ListConsumerGroupOffsetsResult result = admin.listConsumerGroupOffsets(groupId);
return result.partitionsToOffsetAndMetadata().get(20, TimeUnit.SECONDS);
} catch (Exception e) {
throw new IllegalStateException(String.format("Fail to get consumer offsets with the group id [%s].", groupId), e);
}
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaTableTestBase method deleteTestTopic.
public void deleteTestTopic(String topic) {
Map<String, Object> properties = new HashMap<>();
properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers());
try (AdminClient admin = AdminClient.create(properties)) {
admin.deleteTopics(Collections.singletonList(topic));
}
}
Aggregations