use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class JoinStoreIntegrationTest method streamJoinChangelogTopicShouldBeConfiguredWithDeleteOnlyCleanupPolicy.
@Test
public void streamJoinChangelogTopicShouldBeConfiguredWithDeleteOnlyCleanupPolicy() throws Exception {
STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID + "-changelog-cleanup-policy");
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, Integer> left = builder.stream(INPUT_TOPIC_LEFT, Consumed.with(Serdes.String(), Serdes.Integer()));
final KStream<String, Integer> right = builder.stream(INPUT_TOPIC_RIGHT, Consumed.with(Serdes.String(), Serdes.Integer()));
final CountDownLatch latch = new CountDownLatch(1);
left.join(right, Integer::sum, JoinWindows.of(ofMillis(100)), StreamJoined.with(Serdes.String(), Serdes.Integer(), Serdes.Integer()).withStoreName("join-store"));
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), STREAMS_CONFIG);
final Admin admin = Admin.create(ADMIN_CONFIG)) {
kafkaStreams.setStateListener((newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
latch.countDown();
}
});
kafkaStreams.start();
latch.await();
final Collection<ConfigResource> changelogTopics = Stream.of("join-store-integration-test-changelog-cleanup-policy-join-store-this-join-store-changelog", "join-store-integration-test-changelog-cleanup-policy-join-store-other-join-store-changelog").map(name -> new ConfigResource(Type.TOPIC, name)).collect(Collectors.toList());
final Map<ConfigResource, org.apache.kafka.clients.admin.Config> topicConfig = admin.describeConfigs(changelogTopics).all().get();
topicConfig.values().forEach(tc -> assertThat(tc.get("cleanup.policy").value(), is("delete")));
}
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class InternalTopicIntegrationTest method getTopicProperties.
private Properties getTopicProperties(final String changelog) {
try (final Admin adminClient = createAdminClient()) {
final ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, changelog);
try {
final Config config = adminClient.describeConfigs(Collections.singletonList(configResource)).values().get(configResource).get();
final Properties properties = new Properties();
for (final ConfigEntry configEntry : config.entries()) {
if (configEntry.source() == ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG) {
properties.put(configEntry.name(), configEntry.value());
}
}
return properties;
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class KafkaEmbedded method createTopic.
/**
* Create a Kafka topic with the given parameters.
*
* @param topic The name of the topic.
* @param partitions The number of partitions for this topic.
* @param replication The replication factor for (partitions of) this topic.
* @param topicConfig Additional topic-level configuration settings.
*/
public void createTopic(final String topic, final int partitions, final int replication, final Map<String, String> topicConfig) {
log.debug("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig);
final NewTopic newTopic = new NewTopic(topic, partitions, (short) replication);
newTopic.configs(topicConfig);
try (final Admin adminClient = createAdminClient()) {
adminClient.createTopics(Collections.singletonList(newTopic)).all().get();
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class ClientUtilsTest method fetchEndOffsetsShouldReturnEmptyMapIfPartitionsAreEmpty.
@Test
public void fetchEndOffsetsShouldReturnEmptyMapIfPartitionsAreEmpty() {
final Admin adminClient = EasyMock.createMock(AdminClient.class);
assertTrue(fetchEndOffsets(emptySet(), adminClient).isEmpty());
}
use of org.apache.kafka.clients.admin.Admin in project kafka by apache.
the class ClientUtilsTest method fetchEndOffsetsShouldRethrowInterruptedExceptionAsStreamsException.
@Test
public void fetchEndOffsetsShouldRethrowInterruptedExceptionAsStreamsException() throws Exception {
final Admin adminClient = EasyMock.createMock(AdminClient.class);
final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
EasyMock.expect(result.all()).andStubReturn(allFuture);
EasyMock.expect(allFuture.get()).andThrow(new InterruptedException());
replay(adminClient, result, allFuture);
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
verify(adminClient);
}
Aggregations