use of io.strimzi.test.KafkaCluster in project strimzi by strimzi.
the class KafkaClusterTest method testDeployKafkaOnPersistentStorage.
@Test
@KafkaCluster(name = "my-cluster-persistent", kafkaNodes = 2, zkNodes = 2, config = { @CmData(key = "kafka-storage", value = "{ \"type\": \"persistent-claim\", \"size\": \"1Gi\", \"delete-claim\": false }"), @CmData(key = "zookeeper-storage", value = "{ \"type\": \"persistent-claim\", \"size\": \"1Gi\", \"delete-claim\": false }"), @CmData(key = "zookeeper-healthcheck-delay", value = "30"), @CmData(key = "zookeeper-healthcheck-timeout", value = "15"), @CmData(key = "kafka-healthcheck-delay", value = "30"), @CmData(key = "kafka-healthcheck-timeout", value = "15"), @CmData(key = "KAFKA_DEFAULT_REPLICATION_FACTOR", value = "2"), @CmData(key = "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", value = "5"), @CmData(key = "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", value = "5") })
@OpenShiftOnly
public void testDeployKafkaOnPersistentStorage() {
String clusterName = "my-cluster-persistent";
int expectedZKPods = 2;
int expectedKafkaPods = 2;
Oc oc = (Oc) this.kubeClient;
List<String> persistentVolumeClaimNames = oc.list("pvc");
assertTrue(persistentVolumeClaimNames.size() == (expectedZKPods + expectedKafkaPods));
// Checking Persistent volume claims for Zookeeper nodes
for (int i = 0; i < expectedZKPods; i++) {
assertTrue(persistentVolumeClaimNames.contains(zookeeperPVCName(clusterName, i)));
}
// Checking Persistent volume claims for Kafka nodes
for (int i = 0; i < expectedZKPods; i++) {
assertTrue(persistentVolumeClaimNames.contains(kafkaPVCName(clusterName, i)));
}
String configMap = kubeClient.get("cm", clusterName);
assertThat(configMap, valueOfCmEquals("zookeeper-healthcheck-delay", "30"));
assertThat(configMap, valueOfCmEquals("zookeeper-healthcheck-timeout", "15"));
assertThat(configMap, valueOfCmEquals("kafka-healthcheck-delay", "30"));
assertThat(configMap, valueOfCmEquals("kafka-healthcheck-timeout", "15"));
assertThat(configMap, valueOfCmEquals("KAFKA_DEFAULT_REPLICATION_FACTOR", "2"));
assertThat(configMap, valueOfCmEquals("KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", "5"));
assertThat(configMap, valueOfCmEquals("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", "5"));
}
use of io.strimzi.test.KafkaCluster in project strimzi by strimzi.
the class KafkaClusterTest method testKafkaScaleUpScaleDown.
@Test
@KafkaCluster(name = "my-cluster", kafkaNodes = 3)
public void testKafkaScaleUpScaleDown() {
// kafka cluster already deployed via annotation
String clusterName = "my-cluster";
LOGGER.info("Running kafkaScaleUpScaleDown {}", clusterName);
// kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), 3);
KubernetesClient client = new DefaultKubernetesClient();
final int initialReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(3, initialReplicas);
// scale up
final int scaleTo = initialReplicas + 1;
final int newPodId = initialReplicas;
final int newBrokerId = newPodId;
final String newPodName = kafkaPodName(clusterName, newPodId);
final String firstPodName = kafkaPodName(clusterName, 0);
LOGGER.info("Scaling Kafka up to {}", scaleTo);
replaceCm(clusterName, "kafka-nodes", String.valueOf(initialReplicas + 1));
kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), initialReplicas + 1);
// Test that the new broker has joined the kafka cluster by checking it knows about all the other broker's API versions
// (execute bash because we want the env vars expanded in the pod)
String versions = getBrokerApiVersions(newPodName);
for (int brokerId = 0; brokerId < scaleTo; brokerId++) {
assertTrue(versions, versions.indexOf("(id: " + brokerId + " rack: ") >= 0);
}
// TODO Check for k8s events, logs for errors
// scale down
LOGGER.info("Scaling down");
// client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).scale(initialReplicas, true);
replaceCm(clusterName, "kafka-nodes", String.valueOf(initialReplicas));
kubeClient.waitForStatefulSet(kafkaStatefulSetName(clusterName), initialReplicas);
final int finalReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(kafkaStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(initialReplicas, finalReplicas);
versions = getBrokerApiVersions(firstPodName);
assertTrue("Expect the added broker, " + newBrokerId + ", to no longer be present in output of kafka-broker-api-versions.sh", versions.indexOf("(id: " + newBrokerId + " rack: ") == -1);
// TODO Check for k8s events, logs for errors
}
use of io.strimzi.test.KafkaCluster in project strimzi by strimzi.
the class KafkaClusterTest method testZookeeperScaleUpScaleDown.
@Test
@KafkaCluster(name = "my-cluster", kafkaNodes = 1, zkNodes = 1)
public void testZookeeperScaleUpScaleDown() {
// kafka cluster already deployed via annotation
String clusterName = "my-cluster";
LOGGER.info("Running zookeeperScaleUpScaleDown with cluster {}", clusterName);
// kubeClient.waitForStatefulSet(zookeeperStatefulSetName(clusterName), 1);
KubernetesClient client = new DefaultKubernetesClient();
final int initialReplicas = client.apps().statefulSets().inNamespace(NAMESPACE).withName(zookeeperStatefulSetName(clusterName)).get().getStatus().getReplicas();
assertEquals(1, initialReplicas);
// scale up
final int scaleTo = initialReplicas + 2;
final int[] newPodIds = { initialReplicas, initialReplicas + 1 };
final String[] newPodName = { zookeeperPodName(clusterName, newPodIds[0]), zookeeperPodName(clusterName, newPodIds[1]) };
final String firstPodName = zookeeperPodName(clusterName, 0);
LOGGER.info("Scaling zookeeper up to {}", scaleTo);
replaceCm(clusterName, "zookeeper-nodes", String.valueOf(scaleTo));
kubeClient.waitForPod(newPodName[0]);
kubeClient.waitForPod(newPodName[1]);
// check the new node is either in leader or follower state
waitForZkMntr(firstPodName, Pattern.compile("zk_server_state\\s+(leader|follower)"));
waitForZkMntr(newPodName[0], Pattern.compile("zk_server_state\\s+(leader|follower)"));
waitForZkMntr(newPodName[1], Pattern.compile("zk_server_state\\s+(leader|follower)"));
// TODO Check for k8s events, logs for errors
// scale down
LOGGER.info("Scaling down");
replaceCm(clusterName, "zookeeper-nodes", String.valueOf(1));
kubeClient.waitForResourceDeletion("po", zookeeperPodName(clusterName, 1));
// Wait for the one remaining node to enter standalone mode
waitForZkMntr(firstPodName, Pattern.compile("zk_server_state\\s+standalone"));
// TODO Check for k8s events, logs for errors
}
use of io.strimzi.test.KafkaCluster in project strimzi by strimzi.
the class KafkaClusterTest method testForUpdateValuesInConfigMap.
@Test
@OpenShiftOnly
@KafkaCluster(name = "my-cluster", kafkaNodes = 2, zkNodes = 2, config = { @CmData(key = "zookeeper-healthcheck-delay", value = "30"), @CmData(key = "zookeeper-healthcheck-timeout", value = "10"), @CmData(key = "kafka-healthcheck-delay", value = "30"), @CmData(key = "kafka-healthcheck-timeout", value = "10"), @CmData(key = "KAFKA_DEFAULT_REPLICATION_FACTOR", value = "2"), @CmData(key = "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", value = "5"), @CmData(key = "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", value = "5") })
public void testForUpdateValuesInConfigMap() {
String clusterName = "my-cluster";
int expectedZKPods = 2;
int expectedKafkaPods = 2;
List<Date> zkPodStartTime = new ArrayList<>();
for (int i = 0; i < expectedZKPods; i++) {
zkPodStartTime.add(kubeClient.getResourceCreateTimestamp("pod", zookeeperPodName(clusterName, i)));
}
List<Date> kafkaPodStartTime = new ArrayList<>();
for (int i = 0; i < expectedKafkaPods; i++) {
kafkaPodStartTime.add(kubeClient.getResourceCreateTimestamp("pod", kafkaPodName(clusterName, i)));
}
Oc oc = (Oc) this.kubeClient;
replaceCm(clusterName, "zookeeper-healthcheck-delay", "23");
replaceCm(clusterName, "kafka-healthcheck-delay", "23");
replaceCm(clusterName, "KAFKA_DEFAULT_REPLICATION_FACTOR", "1");
for (int i = 0; i < expectedZKPods; i++) {
kubeClient.waitForResourceUpdate("pod", zookeeperPodName(clusterName, i), zkPodStartTime.get(i));
kubeClient.waitForPod(zookeeperPodName(clusterName, i));
}
for (int i = 0; i < expectedKafkaPods; i++) {
kubeClient.waitForResourceUpdate("pod", kafkaPodName(clusterName, i), kafkaPodStartTime.get(i));
kubeClient.waitForPod(kafkaPodName(clusterName, i));
}
String configMap = kubeClient.get("cm", clusterName);
assertThat(configMap, valueOfCmEquals("zookeeper-healthcheck-delay", "23"));
assertThat(configMap, valueOfCmEquals("kafka-healthcheck-delay", "23"));
assertThat(configMap, valueOfCmEquals("KAFKA_DEFAULT_REPLICATION_FACTOR", "1"));
LOGGER.info("Verified CM and Testing kafka pods");
for (int i = 0; i < expectedKafkaPods; i++) {
String kafkaPodJson = oc.getResourceAsJson("pod", kafkaPodName(clusterName, i));
assertEquals("1", getValueFromJson(kafkaPodJson, globalVariableJsonPathBuilder("KAFKA_DEFAULT_REPLICATION_FACTOR")));
String initialDelaySecondsPath = "$.spec.containers[*].livenessProbe.initialDelaySeconds";
assertEquals("23", getValueFromJson(kafkaPodJson, initialDelaySecondsPath));
}
LOGGER.info("Testing Zookeepers");
for (int i = 0; i < expectedZKPods; i++) {
String zkPodJson = kubeClient.getResourceAsJson("pod", zookeeperPodName(clusterName, i));
String initialDelaySecondsPath = "$.spec.containers[*].livenessProbe.initialDelaySeconds";
assertEquals("23", getValueFromJson(zkPodJson, initialDelaySecondsPath));
}
}
Aggregations