use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class ConfigurationsDelta method replay.
public void replay(RemoveTopicRecord record, String topicName) {
ConfigResource resource = new ConfigResource(Type.TOPIC, topicName);
ConfigurationImage configImage = image.resourceData().getOrDefault(resource, ConfigurationImage.EMPTY);
ConfigurationDelta delta = changes.computeIfAbsent(resource, __ -> new ConfigurationDelta(configImage));
delta.deleteAll();
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class ConfigurationsDelta method apply.
public ConfigurationsImage apply() {
Map<ConfigResource, ConfigurationImage> newData = new HashMap<>();
for (Entry<ConfigResource, ConfigurationImage> entry : image.resourceData().entrySet()) {
ConfigResource resource = entry.getKey();
ConfigurationDelta delta = changes.get(resource);
if (delta == null) {
newData.put(resource, entry.getValue());
} else {
ConfigurationImage newImage = delta.apply();
if (!newImage.isEmpty()) {
newData.put(resource, newImage);
}
}
}
for (Entry<ConfigResource, ConfigurationDelta> entry : changes.entrySet()) {
if (!newData.containsKey(entry.getKey())) {
ConfigurationImage newImage = entry.getValue().apply();
if (!newImage.isEmpty()) {
newData.put(entry.getKey(), newImage);
}
}
}
return new ConfigurationsImage(newData);
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class InternalTopicIntegrationTest method getTopicProperties.
private Properties getTopicProperties(final String changelog) {
try (final Admin adminClient = createAdminClient()) {
final ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, changelog);
try {
final Config config = adminClient.describeConfigs(Collections.singletonList(configResource)).values().get(configResource).get();
final Properties properties = new Properties();
for (final ConfigEntry configEntry : config.entries()) {
if (configEntry.source() == ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG) {
properties.put(configEntry.name(), configEntry.value());
}
}
return properties;
} catch (final InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class JoinStoreIntegrationTest method streamJoinChangelogTopicShouldBeConfiguredWithDeleteOnlyCleanupPolicy.
@Test
public void streamJoinChangelogTopicShouldBeConfiguredWithDeleteOnlyCleanupPolicy() throws Exception {
STREAMS_CONFIG.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID + "-changelog-cleanup-policy");
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, Integer> left = builder.stream(INPUT_TOPIC_LEFT, Consumed.with(Serdes.String(), Serdes.Integer()));
final KStream<String, Integer> right = builder.stream(INPUT_TOPIC_RIGHT, Consumed.with(Serdes.String(), Serdes.Integer()));
final CountDownLatch latch = new CountDownLatch(1);
left.join(right, Integer::sum, JoinWindows.of(ofMillis(100)), StreamJoined.with(Serdes.String(), Serdes.Integer(), Serdes.Integer()).withStoreName("join-store"));
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), STREAMS_CONFIG);
final Admin admin = Admin.create(ADMIN_CONFIG)) {
kafkaStreams.setStateListener((newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
latch.countDown();
}
});
kafkaStreams.start();
latch.await();
final Collection<ConfigResource> changelogTopics = Stream.of("join-store-integration-test-changelog-cleanup-policy-join-store-this-join-store-changelog", "join-store-integration-test-changelog-cleanup-policy-join-store-other-join-store-changelog").map(name -> new ConfigResource(Type.TOPIC, name)).collect(Collectors.toList());
final Map<ConfigResource, org.apache.kafka.clients.admin.Config> topicConfig = admin.describeConfigs(changelogTopics).all().get();
topicConfig.values().forEach(tc -> assertThat(tc.get("cleanup.policy").value(), is("delete")));
}
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class InternalTopicManagerTest method shouldThrowTimeoutExceptionWhenFuturesNeverCompleteDuringValidation.
@Test
public void shouldThrowTimeoutExceptionWhenFuturesNeverCompleteDuringValidation() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final MockTime time = new MockTime((Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 3);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final KafkaFutureImpl<TopicDescription> topicDescriptionFutureThatNeverCompletes = new KafkaFutureImpl<>();
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andStubAnswer(() -> new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionFutureThatNeverCompletes))));
final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>();
topicConfigSuccessfulFuture.complete(new Config(repartitionTopicConfig().entrySet().stream().map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet())));
final ConfigResource topicResource = new ConfigResource(Type.TOPIC, topic1);
EasyMock.expect(admin.describeConfigs(Collections.singleton(topicResource))).andStubAnswer(() -> new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigSuccessfulFuture))));
EasyMock.replay(admin);
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
assertThrows(TimeoutException.class, () -> topicManager.validate(Collections.singletonMap(topic1, internalTopicConfig)));
}
Aggregations