use of org.apache.kafka.clients.admin.ConfigEntry in project ksql by confluentinc.
the class KafkaTopicClientImplTest method defaultConfigEntry.
private ConfigEntry defaultConfigEntry(final String key, final String value) {
final ConfigEntry config = mock(ConfigEntry.class);
expect(config.name()).andReturn(key);
expect(config.value()).andReturn(value);
expect(config.source()).andReturn(ConfigEntry.ConfigSource.DEFAULT_CONFIG);
replay(config);
return config;
}
use of org.apache.kafka.clients.admin.ConfigEntry in project ksql by confluentinc.
the class KafkaTopicClientImplTest method overriddenConfigEntry.
private ConfigEntry overriddenConfigEntry(final String key, final String value) {
final ConfigEntry config = mock(ConfigEntry.class);
expect(config.name()).andReturn(key);
expect(config.value()).andReturn(value);
expect(config.source()).andReturn(ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG);
replay(config);
return config;
}
use of org.apache.kafka.clients.admin.ConfigEntry in project strimzi by strimzi.
the class TopicSerializationTest method testFromTopicMetadata.
@Test
public void testFromTopicMetadata() {
List<ConfigEntry> entries = new ArrayList<>();
entries.add(new ConfigEntry("foo", "bar"));
Config topicConfig = new Config(entries);
TopicMetadata meta = Utils.getTopicMetadata("test-topic", topicConfig);
Topic topic = TopicSerialization.fromTopicMetadata(meta);
assertEquals(new TopicName("test-topic"), topic.getTopicName());
// Null map name because Kafka doesn't know about the map
assertNull(topic.getMapName());
assertEquals(singletonMap("foo", "bar"), topic.getConfig());
assertEquals(2, topic.getNumPartitions());
assertEquals(3, topic.getNumReplicas());
}
use of org.apache.kafka.clients.admin.ConfigEntry in project strimzi by strimzi.
the class Utils method getTopicMetadata.
public static TopicMetadata getTopicMetadata(Topic kubeTopic) {
List<Node> nodes = new ArrayList<>();
for (int nodeId = 0; nodeId < kubeTopic.getNumReplicas(); nodeId++) {
nodes.add(new Node(nodeId, "localhost", 9092 + nodeId));
}
List<TopicPartitionInfo> partitions = new ArrayList<>();
for (int partitionId = 0; partitionId < kubeTopic.getNumPartitions(); partitionId++) {
partitions.add(new TopicPartitionInfo(partitionId, nodes.get(0), nodes, nodes));
}
List<ConfigEntry> configs = new ArrayList<>();
for (Map.Entry<String, String> entry : kubeTopic.getConfig().entrySet()) {
configs.add(new ConfigEntry(entry.getKey(), entry.getValue()));
}
return new TopicMetadata(new TopicDescription(kubeTopic.getTopicName().toString(), false, partitions), new Config(configs));
}
use of org.apache.kafka.clients.admin.ConfigEntry in project strimzi by strimzi.
the class ControllerIT method alterTopicConfig.
private void alterTopicConfig(TestContext context, String topicName, String configMapName) throws InterruptedException, ExecutionException {
// Get the topic config
ConfigResource configResource = topicConfigResource(topicName);
org.apache.kafka.clients.admin.Config config = getTopicConfig(configResource);
String key = "compression.type";
Map<String, ConfigEntry> m = new HashMap<>();
for (ConfigEntry entry : config.entries()) {
m.put(entry.name(), entry);
}
final String changedValue;
if ("snappy".equals(m.get(key).value())) {
changedValue = "lz4";
} else {
changedValue = "snappy";
}
m.put(key, new ConfigEntry(key, changedValue));
LOGGER.info("Changing topic config {} to {}", key, changedValue);
// Update the topic config
AlterConfigsResult cgf = adminClient.alterConfigs(singletonMap(configResource, new org.apache.kafka.clients.admin.Config(m.values())));
cgf.all().get();
// Wait for the configmap to be modified
waitFor(context, () -> {
ConfigMap cm = kubeClient.configMaps().inNamespace(NAMESPACE).withName(configMapName).get();
LOGGER.info("Polled configmap {}, waiting for config change", configMapName);
String gotValue = TopicSerialization.fromConfigMap(cm).getConfig().get(key);
LOGGER.info("Got value {}", gotValue);
return changedValue.equals(gotValue);
}, timeout, "Expected the configmap to have been deleted by now");
}
Aggregations