use of io.strimzi.test.WaitException in project strimzi by strimzi.
the class ExternalKafkaClient method consumeMessages.
private int consumeMessages(ConsumerProperties properties) {
Consumer<String, String> consumer = new KafkaConsumer<>(properties.getProperties());
consumer.subscribe(Collections.singletonList(topicName));
CompletableFuture<Integer> received = new CompletableFuture<>();
int[] size = { 0 };
Runnable poll = new Runnable() {
@Override
public void run() {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(15000));
size[0] += records.count();
records.forEach(record -> LOGGER.debug("Received message: {}", record.value()));
if (size[0] >= messageCount) {
received.complete(size[0]);
} else {
this.run();
}
}
};
poll.run();
try {
int messagesReceived = received.get(Constants.GLOBAL_CLIENTS_TIMEOUT, TimeUnit.MILLISECONDS);
LOGGER.info("Received {} messages.", messagesReceived);
consumer.close();
return messagesReceived;
} catch (InterruptedException | ExecutionException | TimeoutException e) {
consumer.close();
e.printStackTrace();
throw new WaitException(e);
}
}
use of io.strimzi.test.WaitException in project strimzi by strimzi.
the class OauthPlainIsolatedST method testProducerConsumerMirrorMaker.
@Description("As an oauth mirror maker, I should be able to replicate topic data between kafka clusters")
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(MIRROR_MAKER)
@Tag(NODEPORT_SUPPORTED)
void testProducerConsumerMirrorMaker(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName;
String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName;
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withOauthClientId(OAUTH_CLIENT_NAME).withOauthClientSecret(OAUTH_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicName, INFRA_NAMESPACE).build());
resourceManager.createResource(extensionContext, oauthExampleClients.producerStrimziOauthPlain());
ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, producerName);
resourceManager.createResource(extensionContext, oauthExampleClients.consumerStrimziOauthPlain());
ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, consumerName);
String targetKafkaCluster = clusterName + "-target";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(targetKafkaCluster, 1, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationOAuth().withValidIssuerUri(keycloakInstance.getValidIssuerUri()).withJwksEndpointUri(keycloakInstance.getJwksEndpointUri()).withJwksExpirySeconds(keycloakInstance.getJwksExpireSeconds()).withJwksRefreshSeconds(keycloakInstance.getJwksRefreshSeconds()).withUserNameClaim(keycloakInstance.getUserNameClaim()).endKafkaListenerAuthenticationOAuth().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).withNewKafkaListenerAuthenticationOAuth().withValidIssuerUri(keycloakInstance.getValidIssuerUri()).withJwksExpirySeconds(keycloakInstance.getJwksExpireSeconds()).withJwksRefreshSeconds(keycloakInstance.getJwksRefreshSeconds()).withJwksEndpointUri(keycloakInstance.getJwksEndpointUri()).withUserNameClaim(keycloakInstance.getUserNameClaim()).endKafkaListenerAuthenticationOAuth().build()).endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaMirrorMakerTemplates.kafkaMirrorMaker(oauthClusterName, oauthClusterName, targetKafkaCluster, ClientUtils.generateRandomConsumerGroup(), 1, false).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().withNewConsumer().withBootstrapServers(KafkaResources.plainBootstrapAddress(oauthClusterName)).withGroupId(ClientUtils.generateRandomConsumerGroup()).addToConfig(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest").withNewKafkaClientAuthenticationOAuth().withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientId("kafka-mirror-maker").withNewClientSecret().withSecretName(MIRROR_MAKER_OAUTH_SECRET).withKey(OAUTH_KEY).endClientSecret().withConnectTimeoutSeconds(CONNECT_TIMEOUT_S).withReadTimeoutSeconds(READ_TIMEOUT_S).endKafkaClientAuthenticationOAuth().withTls(null).endConsumer().withNewProducer().withBootstrapServers(KafkaResources.plainBootstrapAddress(targetKafkaCluster)).withNewKafkaClientAuthenticationOAuth().withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientId("kafka-mirror-maker").withNewClientSecret().withSecretName(MIRROR_MAKER_OAUTH_SECRET).withKey(OAUTH_KEY).endClientSecret().withConnectTimeoutSeconds(CONNECT_TIMEOUT_S).withReadTimeoutSeconds(READ_TIMEOUT_S).endKafkaClientAuthenticationOAuth().addToConfig(ProducerConfig.ACKS_CONFIG, "all").withTls(null).endProducer().endSpec().build());
final String kafkaMirrorMakerPodName = kubeClient(INFRA_NAMESPACE).listPods(INFRA_NAMESPACE, oauthClusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker.RESOURCE_KIND).get(0).getMetadata().getName();
final String kafkaMirrorMakerLogs = KubeClusterResource.cmdKubeClient(INFRA_NAMESPACE).execInCurrentNamespace(Level.DEBUG, "logs", kafkaMirrorMakerPodName).out();
verifyOauthConfiguration(kafkaMirrorMakerLogs);
TestUtils.waitFor("Waiting for Mirror Maker will copy messages from " + oauthClusterName + " to " + targetKafkaCluster, Constants.GLOBAL_CLIENTS_POLL, Constants.TIMEOUT_FOR_MIRROR_MAKER_COPY_MESSAGES_BETWEEN_BROKERS, () -> {
LOGGER.info("Deleting the Job");
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, OAUTH_CONSUMER_NAME);
LOGGER.info("Creating new client with new consumer-group and also to point on {} cluster", targetKafkaCluster);
KafkaOauthClients kafkaOauthClientJob = new KafkaOauthClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(consumerName).withConsumerName(OAUTH_CONSUMER_NAME).withBootstrapAddress(KafkaResources.plainBootstrapAddress(targetKafkaCluster)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withOauthClientId(OAUTH_CLIENT_NAME).withOauthClientSecret(OAUTH_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
resourceManager.createResource(extensionContext, kafkaOauthClientJob.consumerStrimziOauthPlain());
try {
ClientUtils.waitForClientSuccess(OAUTH_CONSUMER_NAME, INFRA_NAMESPACE, MESSAGE_COUNT);
return true;
} catch (WaitException e) {
e.printStackTrace();
return false;
}
});
}
use of io.strimzi.test.WaitException in project strimzi by strimzi.
the class ExternalKafkaClient method sendMessages.
private int sendMessages(ProducerProperties properties) {
Producer<String, String> producer = new KafkaProducer<>(properties.getProperties());
int[] messagesSentCounter = { 0 };
CompletableFuture<Integer> sent = new CompletableFuture<>();
Runnable send = new Runnable() {
@Override
public void run() {
ProducerRecord<String, String> record = new ProducerRecord<>(topicName, partition, null, "\"Hello-world - " + messagesSentCounter[0] + "\"");
try {
if (messagesSentCounter[0] == messageCount) {
sent.complete(messagesSentCounter[0]);
} else {
RecordMetadata metadata = producer.send(record).get();
LOGGER.debug("Message " + record.value() + " written on topic=" + metadata.topic() + ", partition=" + metadata.partition() + ", offset=" + metadata.offset());
messagesSentCounter[0]++;
this.run();
}
} catch (Exception e) {
LOGGER.error("Error sending message {} - {}", messagesSentCounter[0], e.getCause());
e.printStackTrace();
sent.completeExceptionally(e);
}
}
};
send.run();
try {
int messagesSent = sent.get(Constants.GLOBAL_CLIENTS_TIMEOUT, TimeUnit.MILLISECONDS);
LOGGER.info("Sent {} messages.", messagesSent);
producer.close();
return messagesSent;
} catch (InterruptedException | ExecutionException | TimeoutException e) {
producer.close();
e.printStackTrace();
throw new WaitException(e);
}
}
use of io.strimzi.test.WaitException in project strimzi by strimzi.
the class OauthPlainIsolatedST method testProducerConsumerMirrorMaker2.
@IsolatedTest("Using more tha one Kafka cluster in one namespace")
@Tag(MIRROR_MAKER2)
@Tag(CONNECT_COMPONENTS)
@Tag(NODEPORT_SUPPORTED)
void testProducerConsumerMirrorMaker2(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String producerName = OAUTH_PRODUCER_NAME + "-" + clusterName;
String consumerName = OAUTH_CONSUMER_NAME + "-" + clusterName;
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
KafkaOauthClients oauthExampleClients = new KafkaOauthClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(oauthClusterName)).withTopicName(topicName).withMessageCount(MESSAGE_COUNT).withOauthClientId(OAUTH_CLIENT_NAME).withOauthClientSecret(OAUTH_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(oauthClusterName, topicName, INFRA_NAMESPACE).build());
resourceManager.createResource(extensionContext, oauthExampleClients.producerStrimziOauthPlain());
ClientUtils.waitForClientSuccess(producerName, INFRA_NAMESPACE, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, producerName);
resourceManager.createResource(extensionContext, oauthExampleClients.consumerStrimziOauthPlain());
ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, consumerName);
String kafkaSourceClusterName = oauthClusterName;
String kafkaTargetClusterName = clusterName + "-target";
// mirror maker 2 adding prefix to mirrored topic for in this case mirrotopic will be : my-cluster.my-topic
String kafkaTargetClusterTopicName = kafkaSourceClusterName + "." + topicName;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(kafkaTargetClusterName, 1, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationOAuth().withValidIssuerUri(keycloakInstance.getValidIssuerUri()).withJwksEndpointUri(keycloakInstance.getJwksEndpointUri()).withJwksExpirySeconds(keycloakInstance.getJwksExpireSeconds()).withJwksRefreshSeconds(keycloakInstance.getJwksRefreshSeconds()).withUserNameClaim(keycloakInstance.getUserNameClaim()).endKafkaListenerAuthenticationOAuth().build(), new GenericKafkaListenerBuilder().withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME).withPort(9094).withType(KafkaListenerType.NODEPORT).withTls(false).withNewKafkaListenerAuthenticationOAuth().withValidIssuerUri(keycloakInstance.getValidIssuerUri()).withJwksExpirySeconds(keycloakInstance.getJwksExpireSeconds()).withJwksRefreshSeconds(keycloakInstance.getJwksRefreshSeconds()).withJwksEndpointUri(keycloakInstance.getJwksEndpointUri()).withUserNameClaim(keycloakInstance.getUserNameClaim()).endKafkaListenerAuthenticationOAuth().build()).endKafka().endSpec().build());
// Deploy Mirror Maker 2.0 with oauth
KafkaMirrorMaker2ClusterSpec sourceClusterWithOauth = new KafkaMirrorMaker2ClusterSpecBuilder().withAlias(kafkaSourceClusterName).withConfig(connectorConfig).withBootstrapServers(KafkaResources.plainBootstrapAddress(kafkaSourceClusterName)).withNewKafkaClientAuthenticationOAuth().withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientId("kafka-mirror-maker-2").withNewClientSecret().withSecretName(MIRROR_MAKER_2_OAUTH_SECRET).withKey(OAUTH_KEY).endClientSecret().withConnectTimeoutSeconds(CONNECT_TIMEOUT_S).withReadTimeoutSeconds(READ_TIMEOUT_S).endKafkaClientAuthenticationOAuth().build();
KafkaMirrorMaker2ClusterSpec targetClusterWithOauth = new KafkaMirrorMaker2ClusterSpecBuilder().withAlias(kafkaTargetClusterName).withConfig(connectorConfig).withBootstrapServers(KafkaResources.plainBootstrapAddress(kafkaTargetClusterName)).withNewKafkaClientAuthenticationOAuth().withTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).withClientId("kafka-mirror-maker-2").withNewClientSecret().withSecretName(MIRROR_MAKER_2_OAUTH_SECRET).withKey(OAUTH_KEY).endClientSecret().withConnectTimeoutSeconds(CONNECT_TIMEOUT_S).withReadTimeoutSeconds(READ_TIMEOUT_S).endKafkaClientAuthenticationOAuth().build();
resourceManager.createResource(extensionContext, KafkaMirrorMaker2Templates.kafkaMirrorMaker2(oauthClusterName, kafkaTargetClusterName, kafkaSourceClusterName, 1, false).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().withClusters(sourceClusterWithOauth, targetClusterWithOauth).editFirstMirror().withSourceCluster(kafkaSourceClusterName).endMirror().endSpec().build());
final String kafkaMirrorMaker2PodName = kubeClient(INFRA_NAMESPACE).listPods(INFRA_NAMESPACE, oauthClusterName, Labels.STRIMZI_KIND_LABEL, KafkaMirrorMaker2.RESOURCE_KIND).get(0).getMetadata().getName();
final String kafkaMirrorMaker2Logs = KubeClusterResource.cmdKubeClient(INFRA_NAMESPACE).execInCurrentNamespace(Level.DEBUG, "logs", kafkaMirrorMaker2PodName).out();
verifyOauthConfiguration(kafkaMirrorMaker2Logs);
TestUtils.waitFor("Waiting for Mirror Maker 2 will copy messages from " + kafkaSourceClusterName + " to " + kafkaTargetClusterName, Duration.ofSeconds(30).toMillis(), Constants.TIMEOUT_FOR_MIRROR_MAKER_COPY_MESSAGES_BETWEEN_BROKERS, () -> {
LOGGER.info("Deleting the Job {}", consumerName);
JobUtils.deleteJobWithWait(INFRA_NAMESPACE, consumerName);
LOGGER.info("Creating new client with new consumer-group and also to point on {} cluster", kafkaTargetClusterName);
KafkaOauthClients kafkaOauthClientJob = new KafkaOauthClientsBuilder().withNamespaceName(INFRA_NAMESPACE).withProducerName(producerName).withConsumerName(consumerName).withBootstrapAddress(KafkaResources.plainBootstrapAddress(kafkaTargetClusterName)).withTopicName(kafkaTargetClusterTopicName).withMessageCount(MESSAGE_COUNT).withOauthClientId(OAUTH_CLIENT_NAME).withOauthClientSecret(OAUTH_CLIENT_SECRET).withOauthTokenEndpointUri(keycloakInstance.getOauthTokenEndpointUri()).build();
resourceManager.createResource(extensionContext, kafkaOauthClientJob.consumerStrimziOauthPlain());
try {
ClientUtils.waitForClientSuccess(consumerName, INFRA_NAMESPACE, MESSAGE_COUNT);
return true;
} catch (WaitException e) {
e.printStackTrace();
return false;
}
});
}
Aggregations