use of org.apache.kafka.clients.admin.NewTopic in project apache-kafka-on-k8s by banzaicloud.
the class TopicAdminTest method returnNullWithClusterAuthorizationFailure.
@Test
public void returnNullWithClusterAuthorizationFailure() {
final NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().prepareResponse(createTopicResponseWithClusterAuthorizationException(newTopic));
TopicAdmin admin = new TopicAdmin(null, env.adminClient());
boolean created = admin.createTopic(newTopic);
assertFalse(created);
}
}
use of org.apache.kafka.clients.admin.NewTopic in project apache-kafka-on-k8s by banzaicloud.
the class TopicAdminTest method shouldCreateOneTopicWhenProvidedMultipleDefinitionsWithSameTopicName.
@Test
public void shouldCreateOneTopicWhenProvidedMultipleDefinitionsWithSameTopicName() {
NewTopic newTopic1 = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
NewTopic newTopic2 = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicAdmin admin = new TopicAdmin(null, mockAdminClient);
Set<String> newTopicNames = admin.createTopics(newTopic1, newTopic2);
assertEquals(1, newTopicNames.size());
assertEquals(newTopic2.name(), newTopicNames.iterator().next());
}
}
use of org.apache.kafka.clients.admin.NewTopic in project apache-kafka-on-k8s by banzaicloud.
the class TopicAdminTest method shouldNotCreateTopicWhenItAlreadyExists.
@Test
public void shouldNotCreateTopicWhenItAlreadyExists() {
NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build();
Cluster cluster = createCluster(1);
try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) {
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.<Node>emptyList());
mockAdminClient.addTopic(false, "myTopic", Collections.singletonList(topicPartitionInfo), null);
TopicAdmin admin = new TopicAdmin(null, mockAdminClient);
assertFalse(admin.createTopic(newTopic));
}
}
use of org.apache.kafka.clients.admin.NewTopic in project samza by apache.
the class KafkaSystemAdmin method createStream.
@Override
public boolean createStream(StreamSpec streamSpec) {
LOG.info("Creating Kafka topic: {} on system: {}", streamSpec.getPhysicalName(), streamSpec.getSystemName());
final String replFactor = "replication.factor";
KafkaStreamSpec kafkaStreamSpec = toKafkaSpec(streamSpec);
String topicName = kafkaStreamSpec.getPhysicalName();
// create topic.
NewTopic newTopic = new NewTopic(topicName, kafkaStreamSpec.getPartitionCount(), (short) kafkaStreamSpec.getReplicationFactor());
// specify the configs
Map<String, String> streamConfig = new HashMap<>(kafkaStreamSpec.getConfig());
// HACK - replication.factor is invalid config for AdminClient.createTopics
if (streamConfig.containsKey(replFactor)) {
String repl = streamConfig.get(replFactor);
LOG.warn("Configuration {}={} for topic={} is invalid. Using kSpec repl factor {}", replFactor, repl, kafkaStreamSpec.getPhysicalName(), kafkaStreamSpec.getReplicationFactor());
streamConfig.remove(replFactor);
}
newTopic.configs(new MapConfig(streamConfig));
CreateTopicsResult result = adminClient.createTopics(ImmutableSet.of(newTopic));
try {
result.all().get(KAFKA_ADMIN_OPS_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (Exception e) {
if (e instanceof TopicExistsException || e.getCause() instanceof TopicExistsException) {
LOG.info("Topic {} already exists.", topicName);
return false;
}
throw new SamzaException(String.format("Creation of topic %s failed.", topicName), e);
}
LOG.info("Successfully created topic {}", topicName);
DescribeTopicsResult desc = adminClient.describeTopics(ImmutableSet.of(topicName));
try {
TopicDescription td = desc.all().get(KAFKA_ADMIN_OPS_TIMEOUT_MS, TimeUnit.MILLISECONDS).get(topicName);
LOG.info("Topic {} created with {}", topicName, td);
return true;
} catch (Exception e) {
LOG.error("'Describe after create' failed for topic " + topicName, e);
return false;
}
}
use of org.apache.kafka.clients.admin.NewTopic in project kafka by apache.
the class MirrorSourceConnectorTest method testRefreshTopicPartitions.
@Test
public void testRefreshTopicPartitions() throws Exception {
MirrorSourceConnector connector = new MirrorSourceConnector(new SourceAndTarget("source", "target"), new DefaultReplicationPolicy(), new DefaultTopicFilter(), new DefaultConfigPropertyFilter());
connector.initialize(mock(ConnectorContext.class));
connector = spy(connector);
Config topicConfig = new Config(Arrays.asList(new ConfigEntry("cleanup.policy", "compact"), new ConfigEntry("segment.bytes", "100")));
Map<String, Config> configs = Collections.singletonMap("topic", topicConfig);
List<TopicPartition> sourceTopicPartitions = Collections.singletonList(new TopicPartition("topic", 0));
doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions();
doReturn(Collections.emptyList()).when(connector).findTargetTopicPartitions();
doReturn(configs).when(connector).describeTopicConfigs(Collections.singleton("topic"));
doNothing().when(connector).createNewTopics(any());
connector.refreshTopicPartitions();
// if target topic is not created, refreshTopicPartitions() will call createTopicPartitions() again
connector.refreshTopicPartitions();
Map<String, Long> expectedPartitionCounts = new HashMap<>();
expectedPartitionCounts.put("source.topic", 1L);
Map<String, String> configMap = MirrorSourceConnector.configToMap(topicConfig);
assertEquals(2, configMap.size(), "configMap has incorrect size");
Map<String, NewTopic> expectedNewTopics = new HashMap<>();
expectedNewTopics.put("source.topic", new NewTopic("source.topic", 1, (short) 0).configs(configMap));
verify(connector, times(2)).computeAndCreateTopicPartitions();
verify(connector, times(2)).createNewTopics(eq(expectedNewTopics));
verify(connector, times(0)).createNewPartitions(any());
List<TopicPartition> targetTopicPartitions = Collections.singletonList(new TopicPartition("source.topic", 0));
doReturn(targetTopicPartitions).when(connector).findTargetTopicPartitions();
connector.refreshTopicPartitions();
// once target topic is created, refreshTopicPartitions() will NOT call computeAndCreateTopicPartitions() again
verify(connector, times(2)).computeAndCreateTopicPartitions();
}
Aggregations