use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicST method testSendingMessagesToNonExistingTopic.
@ParallelTest
@Tag(INTERNAL_CLIENTS_USED)
void testSendingMessagesToNonExistingTopic(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, namespace);
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(TOPIC_CLUSTER_NAME)).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
LOGGER.info("Checking if {} is on topic list", testStorage.getTopicName());
assertFalse(hasTopicInKafka(testStorage.getTopicName(), TOPIC_CLUSTER_NAME));
LOGGER.info("Topic with name {} is not created yet", testStorage.getTopicName());
LOGGER.info("Trying to send messages to non-existing topic {}", testStorage.getTopicName());
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Checking if {} is on topic list", testStorage.getTopicName());
assertTrue(hasTopicInKafka(testStorage.getTopicName(), TOPIC_CLUSTER_NAME));
KafkaTopicUtils.waitForKafkaTopicCreation(namespace, testStorage.getTopicName());
KafkaTopic kafkaTopic = KafkaTopicResource.kafkaTopicClient().inNamespace(namespace).withName(testStorage.getTopicName()).get();
assertThat(kafkaTopic, notNullValue());
assertThat(kafkaTopic.getStatus(), notNullValue());
assertThat(kafkaTopic.getStatus().getConditions(), notNullValue());
assertThat(kafkaTopic.getStatus().getConditions().isEmpty(), is(false));
assertThat(kafkaTopic.getStatus().getConditions().get(0).getType(), is(Ready.toString()));
LOGGER.info("Topic successfully created");
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicST method testCreateTopicViaAdminClient.
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(NODEPORT_SUPPORTED)
void testCreateTopicViaAdminClient(ExtensionContext extensionContext) throws ExecutionException, InterruptedException, TimeoutException {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 3).editMetadata().withNamespace(namespace).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).build()).endKafka().endSpec().build());
Properties properties = new Properties();
properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaResource.kafkaClient().inNamespace(namespace).withName(clusterName).get().getStatus().getListeners().stream().filter(listener -> listener.getType().equals(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)).findFirst().orElseThrow(RuntimeException::new).getBootstrapServers());
try (AdminClient adminClient = AdminClient.create(properties)) {
Set<String> topics = adminClient.listTopics().names().get(Constants.GLOBAL_CLIENTS_TIMEOUT, TimeUnit.MILLISECONDS);
// new KafkaStreamsTopicStore has topology input topics
int topicsSize = topics.size();
LOGGER.info("Creating async topic {} via Admin client", topicName);
CreateTopicsResult crt = adminClient.createTopics(singletonList(new NewTopic(topicName, 1, (short) 1)));
crt.all().get();
TestUtils.waitFor("Wait until Kafka cluster has " + (topicsSize + 1) + " KafkaTopic", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT, () -> {
Set<String> updatedKafkaTopics = new HashSet<>();
try {
updatedKafkaTopics = adminClient.listTopics().names().get(Constants.GLOBAL_CLIENTS_TIMEOUT, TimeUnit.MILLISECONDS);
LOGGER.info("Verify that in Kafka cluster contains {} topics", topicsSize + 1);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
e.printStackTrace();
}
return updatedKafkaTopics.size() == topicsSize + 1 && updatedKafkaTopics.contains(topicName);
});
KafkaTopicUtils.waitForKafkaTopicCreation(namespace, topicName);
KafkaTopicUtils.waitForKafkaTopicReady(namespace, topicName);
}
KafkaTopic kafkaTopic = KafkaTopicResource.kafkaTopicClient().inNamespace(namespace).withName(topicName).get();
LOGGER.info("Verify that corresponding {} KafkaTopic custom resources were created and topic is in Ready state", 1);
assertThat(kafkaTopic.getStatus().getConditions().get(0).getType(), is(Ready.toString()));
assertThat(kafkaTopic.getSpec().getPartitions(), is(1));
assertThat(kafkaTopic.getSpec().getReplicas(), is(1));
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class MirrorMaker2IsolatedST method testMirrorMaker2.
@ParallelNamespaceTest
void testMirrorMaker2(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
String sourceMirroredTopicName = kafkaClusterSourceName + "." + testStorage.getTopicName();
Map<String, Object> expectedConfig = StUtils.loadProperties("group.id=mirrormaker2-cluster\n" + "key.converter=org.apache.kafka.connect.converters.ByteArrayConverter\n" + "value.converter=org.apache.kafka.connect.converters.ByteArrayConverter\n" + "header.converter=org.apache.kafka.connect.converters.ByteArrayConverter\n" + "config.storage.topic=mirrormaker2-cluster-configs\n" + "status.storage.topic=mirrormaker2-cluster-status\n" + "offset.storage.topic=mirrormaker2-cluster-offsets\n" + "config.storage.replication.factor=-1\n" + "status.storage.replication.factor=-1\n" + "offset.storage.replication.factor=-1\n" + "config.providers=file\n" + "config.providers.file.class=org.apache.kafka.common.config.provider.FileConfigProvider\n");
// Deploy source and target kafka
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1).build());
// Deploy source topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, testStorage.getTopicName(), 3).build());
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
// Check brokers availability
LOGGER.info("Messages exchange - topic {}, cluster {} and message count of {}", testStorage.getTopicName(), kafkaClusterSourceName, MESSAGE_COUNT);
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage.getClusterName(), kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editFirstMirror().editSourceConnector().addToConfig("refresh.topics.interval.seconds", "1").endSourceConnector().endMirror().endSpec().build());
String podName = PodUtils.getPodNameByPrefix(testStorage.getNamespaceName(), KafkaMirrorMaker2Resources.deploymentName(testStorage.getClusterName()));
String kafkaPodJson = TestUtils.toJsonString(kubeClient().getPod(testStorage.getNamespaceName(), podName));
assertThat(kafkaPodJson, hasJsonPath(StUtils.globalVariableJsonPathBuilder(0, "KAFKA_CONNECT_BOOTSTRAP_SERVERS"), hasItem(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName))));
assertThat(StUtils.getPropertiesFromJson(0, kafkaPodJson, "KAFKA_CONNECT_CONFIGURATION"), is(expectedConfig));
testDockerImagesForKafkaMirrorMaker2(testStorage.getClusterName(), clusterOperator.getDeploymentNamespace(), testStorage.getNamespaceName());
verifyLabelsOnPods(testStorage.getNamespaceName(), testStorage.getClusterName(), "mirrormaker2", KafkaMirrorMaker2.RESOURCE_KIND);
verifyLabelsForService(testStorage.getNamespaceName(), testStorage.getClusterName(), "mirrormaker2-api", KafkaMirrorMaker2.RESOURCE_KIND);
verifyLabelsForConfigMaps(testStorage.getNamespaceName(), kafkaClusterSourceName, null, kafkaClusterTargetName);
verifyLabelsForServiceAccounts(testStorage.getNamespaceName(), kafkaClusterSourceName, null);
LOGGER.info("Now setting topic to {} and cluster to {} - the messages should be mirrored", sourceMirroredTopicName, kafkaClusterTargetName);
clients = new KafkaClientsBuilder(clients).withTopicName(sourceMirroredTopicName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).build();
LOGGER.info("Consumer in target cluster and topic should receive {} messages", MESSAGE_COUNT);
resourceManager.createResource(extensionContext, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Mirrored successful");
if (!Environment.isKRaftModeEnabled()) {
KafkaTopic mirroredTopic = KafkaTopicResource.kafkaTopicClient().inNamespace(testStorage.getNamespaceName()).withName(sourceMirroredTopicName).get();
assertThat(mirroredTopic.getSpec().getPartitions(), is(3));
assertThat(mirroredTopic.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(kafkaClusterTargetName));
// Replace source topic resource with new data and check that mm2 update target topi
KafkaTopicResource.replaceTopicResourceInSpecificNamespace(testStorage.getTopicName(), kt -> kt.getSpec().setPartitions(8), testStorage.getNamespaceName());
KafkaTopicUtils.waitForKafkaTopicPartitionChange(testStorage.getNamespaceName(), sourceMirroredTopicName, 8);
}
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class MirrorMaker2IsolatedST method testStrimziIdentityReplicationPolicy.
/*
* This test is using the Strimzi Identity Replication policy. This is needed for backwards compatibility for users
* who might still have it configured.
*
* This ST should be deleted once we drop the Strimzi policy completely.
*/
@ParallelNamespaceTest
void testStrimziIdentityReplicationPolicy(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
// Create topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, testStorage.getTopicName(), 3).build());
resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage.getClusterName(), kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editMirror(0).editSourceConnector().addToConfig("replication.policy.class", "io.strimzi.kafka.connect.mirror.IdentityReplicationPolicy").addToConfig("refresh.topics.interval.seconds", "1").endSourceConnector().endMirror().endSpec().build());
LOGGER.info("Sending and receiving messages via {}", kafkaClusterSourceName);
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Changing to {} and will try to receive messages", kafkaClusterTargetName);
clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).build();
resourceManager.createResource(extensionContext, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
if (!Environment.isKRaftModeEnabled()) {
LOGGER.info("Checking if the mirrored topic name is same as the original one");
List<String> kafkaTopics = KafkaCmdClient.listTopicsUsingPodCli(testStorage.getNamespaceName(), kafkaClusterTargetName, 0);
assertNotNull(kafkaTopics.stream().filter(kafkaTopic -> kafkaTopic.equals(testStorage.getTopicName())).findAny());
List<String> kafkaTopicSpec = KafkaCmdClient.describeTopicUsingPodCli(testStorage.getNamespaceName(), kafkaClusterTargetName, 0, testStorage.getTopicName());
assertThat(kafkaTopicSpec.stream().filter(token -> token.startsWith("Topic:")).findFirst().orElse(null), equalTo("Topic:" + testStorage.getTopicName()));
assertThat(kafkaTopicSpec.stream().filter(token -> token.startsWith("PartitionCount:")).findFirst().orElse(null), equalTo("PartitionCount:3"));
}
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class MirrorMaker2IsolatedST method testIdentityReplicationPolicy.
/*
* This test is using the Kafka Identity Replication policy. This is what should be used by all new users.
*/
@ParallelNamespaceTest
void testIdentityReplicationPolicy(ExtensionContext extensionContext) {
final TestStorage testStorage = new TestStorage(extensionContext, clusterOperator.getDeploymentNamespace());
String kafkaClusterSourceName = testStorage.getClusterName() + "-source";
String kafkaClusterTargetName = testStorage.getClusterName() + "-target";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaClusterSourceName, 1, 1).build(), KafkaTemplates.kafkaEphemeral(kafkaClusterTargetName, 1, 1).build());
// Create topic
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(kafkaClusterSourceName, testStorage.getTopicName(), 3).build());
resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(testStorage.getClusterName(), kafkaClusterTargetName, kafkaClusterSourceName, 1, false).editSpec().editMirror(0).editSourceConnector().addToConfig("replication.policy.class", "org.apache.kafka.connect.mirror.IdentityReplicationPolicy").addToConfig("refresh.topics.interval.seconds", "1").endSourceConnector().endMirror().endSpec().build());
LOGGER.info("Sending and receiving messages via {}", kafkaClusterSourceName);
KafkaClients clients = new KafkaClientsBuilder().withProducerName(testStorage.getProducerName()).withConsumerName(testStorage.getConsumerName()).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterSourceName)).withNamespaceName(testStorage.getNamespaceName()).withTopicName(testStorage.getTopicName()).withMessageCount(MESSAGE_COUNT).build();
resourceManager.createResource(extensionContext, clients.producerStrimzi(), clients.consumerStrimzi());
ClientUtils.waitForClientsSuccess(testStorage.getProducerName(), testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
LOGGER.info("Changing to {} and will try to receive messages", kafkaClusterTargetName);
clients = new KafkaClientsBuilder(clients).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaClusterTargetName)).build();
resourceManager.createResource(extensionContext, clients.consumerStrimzi());
ClientUtils.waitForClientSuccess(testStorage.getConsumerName(), testStorage.getNamespaceName(), MESSAGE_COUNT);
if (!Environment.isKRaftModeEnabled()) {
LOGGER.info("Checking if the mirrored topic name is same as the original one");
List<String> kafkaTopics = KafkaCmdClient.listTopicsUsingPodCli(testStorage.getNamespaceName(), kafkaClusterTargetName, 0);
assertNotNull(kafkaTopics.stream().filter(kafkaTopic -> kafkaTopic.equals(testStorage.getTopicName())).findAny());
List<String> kafkaTopicSpec = KafkaCmdClient.describeTopicUsingPodCli(testStorage.getNamespaceName(), kafkaClusterTargetName, 0, testStorage.getTopicName());
assertThat(kafkaTopicSpec.stream().filter(token -> token.startsWith("Topic:")).findFirst().orElse(null), equalTo("Topic:" + testStorage.getTopicName()));
assertThat(kafkaTopicSpec.stream().filter(token -> token.startsWith("PartitionCount:")).findFirst().orElse(null), equalTo("PartitionCount:3"));
}
}
Aggregations