use of org.apache.kafka.clients.admin.NewTopic in project strimzi by strimzi.
the class ControllerIT method createTopic.
private String createTopic(TestContext context, String topicName) throws InterruptedException, ExecutionException {
LOGGER.info("Creating topic {}", topicName);
// Create a topic
String configMapName = new TopicName(topicName).asMapName().toString();
CreateTopicsResult crt = adminClient.createTopics(singletonList(new NewTopic(topicName, 1, (short) 1)));
crt.all().get();
// Wait for the configmap to be created
waitFor(context, () -> {
ConfigMap cm = kubeClient.configMaps().inNamespace(NAMESPACE).withName(configMapName).get();
LOGGER.info("Polled configmap {} waiting for creation", configMapName);
return cm != null;
}, timeout, "Expected the configmap to have been created by now");
LOGGER.info("configmap {} has been created", configMapName);
return configMapName;
}
use of org.apache.kafka.clients.admin.NewTopic in project samza by apache.
the class TestZkLocalApplicationRunner method setUp.
@Override
public void setUp() {
super.setUp();
String uniqueTestId = UUID.randomUUID().toString();
testStreamAppName = String.format("test-app-name-%s", uniqueTestId);
testStreamAppId = String.format("test-app-id-%s", uniqueTestId);
inputKafkaTopic = String.format("test-input-topic-%s", uniqueTestId);
outputKafkaTopic = String.format("test-output-topic-%s", uniqueTestId);
inputSinglePartitionKafkaTopic = String.format("test-input-single-partition-topic-%s", uniqueTestId);
outputSinglePartitionKafkaTopic = String.format("test-output-single-partition-topic-%s", uniqueTestId);
// Set up stream application config map with the given testStreamAppName, testStreamAppId and test kafka system
// TODO: processorId should typically come up from a processorID generator as processor.id will be deprecated in 0.14.0+
Map<String, String> configMap = buildStreamApplicationConfigMap(testStreamAppName, testStreamAppId, false, Optional.empty());
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[0]);
applicationConfig1 = new ApplicationConfig(new MapConfig(configMap));
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[1]);
applicationConfig2 = new ApplicationConfig(new MapConfig(configMap));
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[2]);
applicationConfig3 = new ApplicationConfig(new MapConfig(configMap));
ZkClient zkClient = new ZkClient(zkConnect(), ZK_CONNECTION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, new ZkStringSerializer());
ZkKeyBuilder zkKeyBuilder = new ZkKeyBuilder(ZkJobCoordinatorFactory.getJobCoordinationZkPath(applicationConfig1));
zkUtils = new ZkUtils(zkKeyBuilder, zkClient, ZK_CONNECTION_TIMEOUT_MS, ZK_SESSION_TIMEOUT_MS, new NoOpMetricsRegistry());
zkUtils.connect();
topicToPartitionCount = ImmutableMap.of(inputSinglePartitionKafkaTopic, 1, outputSinglePartitionKafkaTopic, 1, inputKafkaTopic, NUM_PARTITION, outputKafkaTopic, NUM_PARTITION);
List<NewTopic> newTopics = ImmutableList.of(inputKafkaTopic, outputKafkaTopic, inputSinglePartitionKafkaTopic, outputSinglePartitionKafkaTopic).stream().map(topic -> new NewTopic(topic, topicToPartitionCount.get(topic), (short) 1)).collect(Collectors.toList());
assertTrue("Encountered errors during test setup. Failed to create topics.", createTopics(newTopics));
zkMetadataStore = new ZkMetadataStore(zkUtils.getKeyBuilder().getRootPath(), new MapConfig(configMap), new NoOpMetricsRegistry());
}
use of org.apache.kafka.clients.admin.NewTopic in project samza by apache.
the class TestStartpoint method setUp.
@Override
public void setUp() {
super.setUp();
String uniqueTestId = UUID.randomUUID().toString();
testStreamAppName = String.format("test-app-name-%s", uniqueTestId);
testStreamAppId = String.format("test-app-id-%s", uniqueTestId);
inputKafkaTopic1 = String.format("test-input-topic1-%s", uniqueTestId);
inputKafkaTopic2 = String.format("test-input-topic2-%s", uniqueTestId);
inputKafkaTopic3 = String.format("test-input-topic3-%s", uniqueTestId);
inputKafkaTopic4 = String.format("test-input-topic4-%s", uniqueTestId);
outputKafkaTopic = String.format("test-output-topic-%s", uniqueTestId);
// Set up stream application config map with the given testStreamAppName, testStreamAppId and test kafka system
// TODO: processorId should typically come up from a processorID generator as processor.id will be deprecated in 0.14.0+
Map<String, String> configMap = buildStreamApplicationConfigMap(testStreamAppName, testStreamAppId);
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[0]);
applicationConfig1 = new ApplicationConfig(new MapConfig(configMap));
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[1]);
applicationConfig2 = new ApplicationConfig(new MapConfig(configMap));
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[2]);
applicationConfig3 = new ApplicationConfig(new MapConfig(configMap));
configMap.put(JobConfig.PROCESSOR_ID, PROCESSOR_IDS[3]);
applicationConfig4 = new ApplicationConfig(new MapConfig(configMap));
ImmutableMap<String, Integer> topicToPartitionCount = ImmutableMap.<String, Integer>builder().put(inputKafkaTopic1, ZK_TEST_PARTITION_COUNT).put(inputKafkaTopic2, ZK_TEST_PARTITION_COUNT).put(inputKafkaTopic3, ZK_TEST_PARTITION_COUNT).put(inputKafkaTopic4, ZK_TEST_PARTITION_COUNT).put(outputKafkaTopic, ZK_TEST_PARTITION_COUNT).build();
List<NewTopic> newTopics = topicToPartitionCount.keySet().stream().map(topic -> new NewTopic(topic, topicToPartitionCount.get(topic), (short) 1)).collect(Collectors.toList());
assertTrue("Encountered errors during test setup. Failed to create topics.", createTopics(newTopics));
}
use of org.apache.kafka.clients.admin.NewTopic in project flink by apache.
the class KafkaSourceExternalContext method createSinglePartitionTopic.
private KafkaPartitionDataWriter createSinglePartitionTopic(int topicIndex) throws Exception {
String newTopicName = topicName + "-" + topicIndex;
LOG.info("Creating topic '{}'", newTopicName);
adminClient.createTopics(Collections.singletonList(new NewTopic(newTopicName, 1, (short) 1))).all().get();
return new KafkaPartitionDataWriter(getKafkaProducerProperties(topicIndex), new TopicPartition(newTopicName, 0));
}
use of org.apache.kafka.clients.admin.NewTopic in project flink by apache.
the class KafkaSourceReaderTest method setup.
@BeforeAll
public static void setup() throws Throwable {
KafkaSourceTestEnv.setup();
try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
adminClient.createTopics(Collections.singleton(new NewTopic(TOPIC, NUM_PARTITIONS, (short) 1)));
// Use the admin client to trigger the creation of internal __consumer_offsets topic.
// This makes sure that we won't see unavailable coordinator in the tests.
waitUtil(() -> {
try {
adminClient.listConsumerGroupOffsets("AnyGroup").partitionsToOffsetAndMetadata().get();
} catch (Exception e) {
return false;
}
return true;
}, Duration.ofSeconds(60), "Waiting for offsets topic creation failed.");
}
KafkaSourceTestEnv.produceToKafka(getRecords(), StringSerializer.class, IntegerSerializer.class);
}
Aggregations