use of org.apache.kafka.clients.admin.AdminClient in project apache-kafka-on-k8s by banzaicloud.
the class ClientCompatibilityTest method testAdminClient.
void testAdminClient() throws Throwable {
Properties adminProps = new Properties();
adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, testConfig.bootstrapServer);
try (final AdminClient client = AdminClient.create(adminProps)) {
while (true) {
Collection<Node> nodes = client.describeCluster().nodes().get();
if (nodes.size() == testConfig.numClusterNodes) {
break;
} else if (nodes.size() > testConfig.numClusterNodes) {
throw new KafkaException("Expected to see " + testConfig.numClusterNodes + " nodes, but saw " + nodes.size());
}
Thread.sleep(1);
log.info("Saw only {} cluster nodes. Waiting to see {}.", nodes.size(), testConfig.numClusterNodes);
}
tryFeature("createTopics", testConfig.createTopicsSupported, new Invoker() {
@Override
public void invoke() throws Throwable {
try {
client.createTopics(Collections.singleton(new NewTopic("newtopic", 1, (short) 1))).all().get();
} catch (ExecutionException e) {
throw e.getCause();
}
}
}, new ResultTester() {
@Override
public void test() throws Throwable {
while (true) {
try {
client.describeTopics(Collections.singleton("newtopic")).all().get();
break;
} catch (ExecutionException e) {
if (e.getCause() instanceof UnknownTopicOrPartitionException)
continue;
throw e;
}
}
}
});
while (true) {
Collection<TopicListing> listings = client.listTopics().listings().get();
if (!testConfig.createTopicsSupported)
break;
boolean foundNewTopic = false;
for (TopicListing listing : listings) {
if (listing.name().equals("newtopic")) {
if (listing.isInternal())
throw new KafkaException("Did not expect newtopic to be an internal topic.");
foundNewTopic = true;
}
}
if (foundNewTopic)
break;
Thread.sleep(1);
log.info("Did not see newtopic. Retrying listTopics...");
}
tryFeature("describeAclsSupported", testConfig.describeAclsSupported, new Invoker() {
@Override
public void invoke() throws Throwable {
try {
client.describeAcls(AclBindingFilter.ANY).values().get();
} catch (ExecutionException e) {
if (e.getCause() instanceof SecurityDisabledException)
return;
throw e.getCause();
}
}
});
}
}
use of org.apache.kafka.clients.admin.AdminClient in project eventapis by kloiasoft.
the class Eventapis method main.
public static void main(String[] args) throws ExecutionException, InterruptedException {
Map props = new HashMap<>();
// list of host:port pairs used for establishing the initial connections
// to the Kakfa cluster
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-local:9092");
// props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
// JsonSerializer.class);
// props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
// JsonSerializer.class);
// value to block, after which it will throw a TimeoutException
props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
AdminClient adminClient = AdminClient.create(props);
adminClient.describeCluster();
Collection<TopicListing> topicListings = adminClient.listTopics().listings().get();
System.out.println(topicListings);
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaEnumeratorTest method testKafkaClientProperties.
@Test
public void testKafkaClientProperties() throws Exception {
Properties properties = new Properties();
String clientIdPrefix = "test-prefix";
Integer defaultTimeoutMs = 99999;
properties.setProperty(KafkaSourceOptions.CLIENT_ID_PREFIX.key(), clientIdPrefix);
properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(defaultTimeoutMs));
try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY, PRE_EXISTING_TOPICS, Collections.emptySet(), properties)) {
enumerator.start();
AdminClient adminClient = (AdminClient) Whitebox.getInternalState(enumerator, "adminClient");
assertNotNull(adminClient);
String clientId = (String) Whitebox.getInternalState(adminClient, "clientId");
assertNotNull(clientId);
assertTrue(clientId.startsWith(clientIdPrefix));
assertEquals(defaultTimeoutMs, Whitebox.getInternalState(adminClient, "defaultApiTimeoutMs"));
assertNotNull(clientId);
assertTrue(clientId.startsWith(clientIdPrefix));
}
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaSourceReaderTest method testCommitOffsetsWithoutAliveFetchers.
// -----------------------------------------
@Test
void testCommitOffsetsWithoutAliveFetchers() throws Exception {
final String groupId = "testCommitOffsetsWithoutAliveFetchers";
try (KafkaSourceReader<Integer> reader = (KafkaSourceReader<Integer>) createReader(Boundedness.CONTINUOUS_UNBOUNDED, groupId)) {
KafkaPartitionSplit split = new KafkaPartitionSplit(new TopicPartition(TOPIC, 0), 0, NUM_RECORDS_PER_SPLIT);
reader.addSplits(Collections.singletonList(split));
reader.notifyNoMoreSplits();
ReaderOutput<Integer> output = new TestingReaderOutput<>();
InputStatus status;
do {
status = reader.pollNext(output);
} while (status != InputStatus.NOTHING_AVAILABLE);
pollUntil(reader, output, () -> reader.getNumAliveFetchers() == 0, "The split fetcher did not exit before timeout.");
reader.snapshotState(100L);
reader.notifyCheckpointComplete(100L);
// Due to a bug in KafkaConsumer, when the consumer closes, the offset commit callback
// won't be fired, so the offsetsToCommit map won't be cleaned. To make the test
// stable, we add a split whose starting offset is the log end offset, so the
// split fetcher won't become idle and exit after commitOffsetAsync is invoked from
// notifyCheckpointComplete().
reader.addSplits(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(TOPIC, 0), NUM_RECORDS_PER_SPLIT)));
pollUntil(reader, output, () -> reader.getOffsetsToCommit().isEmpty(), "The offset commit did not finish before timeout.");
}
// Verify the committed offsets.
try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
Map<TopicPartition, OffsetAndMetadata> committedOffsets = adminClient.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get();
assertThat(committedOffsets).hasSize(1);
assertThat(committedOffsets.values()).extracting(OffsetAndMetadata::offset).allMatch(offset -> offset == NUM_RECORDS_PER_SPLIT);
}
}
use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.
the class KafkaSourceReaderTest method testOffsetCommitOnCheckpointComplete.
@Test
void testOffsetCommitOnCheckpointComplete() throws Exception {
final String groupId = "testOffsetCommitOnCheckpointComplete";
try (KafkaSourceReader<Integer> reader = (KafkaSourceReader<Integer>) createReader(Boundedness.CONTINUOUS_UNBOUNDED, groupId)) {
reader.addSplits(getSplits(numSplits, NUM_RECORDS_PER_SPLIT, Boundedness.CONTINUOUS_UNBOUNDED));
ValidatingSourceOutput output = new ValidatingSourceOutput();
long checkpointId = 0;
do {
checkpointId++;
reader.pollNext(output);
// Create a checkpoint for each message consumption, but not complete them.
reader.snapshotState(checkpointId);
} while (output.count() < totalNumRecords);
// The completion of the last checkpoint should subsume all the previous checkpoitns.
assertThat(reader.getOffsetsToCommit()).hasSize((int) checkpointId);
long lastCheckpointId = checkpointId;
waitUtil(() -> {
try {
reader.notifyCheckpointComplete(lastCheckpointId);
} catch (Exception exception) {
throw new RuntimeException("Caught unexpected exception when polling from the reader", exception);
}
return reader.getOffsetsToCommit().isEmpty();
}, Duration.ofSeconds(60), Duration.ofSeconds(1), "The offset commit did not finish before timeout.");
}
// Verify the committed offsets.
try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
Map<TopicPartition, OffsetAndMetadata> committedOffsets = adminClient.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get();
assertThat(committedOffsets).hasSize(numSplits);
assertThat(committedOffsets.values()).extracting(OffsetAndMetadata::offset).allMatch(offset -> offset == NUM_RECORDS_PER_SPLIT);
}
}
Aggregations