use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class KafkaUpgradeDowngradeIsolatedST method testKafkaClusterDowngradeToOlderMessageFormat.
@IsolatedTest
void testKafkaClusterDowngradeToOlderMessageFormat(ExtensionContext testContext) {
List<TestKafkaVersion> sortedVersions = TestKafkaVersion.getSupportedKafkaVersions();
String clusterName = mapWithClusterNames.get(testContext.getDisplayName());
String producerName = clusterName + "-producer";
String consumerName = clusterName + "-consumer";
String initLogMsgFormat = sortedVersions.get(0).messageVersion();
String initInterBrokerProtocol = sortedVersions.get(0).protocolVersion();
for (int x = sortedVersions.size() - 1; x > 0; x--) {
TestKafkaVersion initialVersion = sortedVersions.get(x);
TestKafkaVersion newVersion = sortedVersions.get(x - 1);
runVersionChange(initialVersion, newVersion, producerName, consumerName, initLogMsgFormat, initInterBrokerProtocol, 3, 3, testContext);
}
// ##############################
// Validate that continuous clients finished successfully
// ##############################
ClientUtils.waitTillContinuousClientsFinish(producerName, consumerName, INFRA_NAMESPACE, continuousClientsMessageCount);
// ##############################
}
use of io.strimzi.systemtest.annotations.IsolatedTest in project strimzi by strimzi.
the class ClusterOperationIsolatedST method testAvailabilityDuringNodeDrain.
@IsolatedTest
@MultiNodeClusterOnly
@RequiredMinKubeApiVersion(version = 1.15)
void testAvailabilityDuringNodeDrain(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
int size = 5;
List<String> topicNames = IntStream.range(0, size).boxed().map(i -> "test-topic-" + i).collect(Collectors.toList());
List<String> producerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-producer-" + i).collect(Collectors.toList());
List<String> consumerNames = IntStream.range(0, size).boxed().map(i -> "hello-world-consumer-" + i).collect(Collectors.toList());
List<String> continuousConsumerGroups = IntStream.range(0, size).boxed().map(i -> "continuous-consumer-group-" + i).collect(Collectors.toList());
int continuousClientsMessageCount = 300;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3).editOrNewSpec().editEntityOperator().editUserOperator().withReconciliationIntervalSeconds(30).endUserOperator().endEntityOperator().endSpec().build());
topicNames.forEach(topicName -> resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 3, 2).build()));
String producerAdditionConfiguration = "delivery.timeout.ms=20000\nrequest.timeout.ms=20000";
KafkaClients kafkaBasicClientResource;
for (int i = 0; i < size; i++) {
kafkaBasicClientResource = new KafkaClientsBuilder().withProducerName(producerNames.get(i)).withConsumerName(consumerNames.get(i)).withBootstrapAddress(KafkaResources.plainBootstrapAddress(clusterName)).withTopicName(topicNames.get(i)).withMessageCount(continuousClientsMessageCount).withAdditionalConfig(producerAdditionConfiguration).withConsumerGroup(continuousConsumerGroups.get(i)).withDelayMs(1000).build();
resourceManager.createResource(extensionContext, kafkaBasicClientResource.producerStrimzi());
resourceManager.createResource(extensionContext, kafkaBasicClientResource.consumerStrimzi());
}
// ##############################
// Nodes draining
// ##############################
kubeClient().getClusterWorkers().forEach(node -> {
NodeUtils.drainNode(node.getMetadata().getName());
NodeUtils.cordonNode(node.getMetadata().getName(), true);
});
producerNames.forEach(producerName -> ClientUtils.waitTillContinuousClientsFinish(producerName, consumerNames.get(producerName.indexOf(producerName)), NAMESPACE, continuousClientsMessageCount));
producerNames.forEach(producerName -> kubeClient().deleteJob(producerName));
consumerNames.forEach(consumerName -> kubeClient().deleteJob(consumerName));
}
Aggregations