use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.
the class KafkaConnectClusterTest method checkRack.
private void checkRack(Deployment deployment, KafkaConnect resource) {
Rack rack = resource.getSpec().getRack();
if (rack != null) {
PodSpec podSpec = deployment.getSpec().getTemplate().getSpec();
// check that pod spec contains the init Kafka container
List<Container> initContainers = podSpec.getInitContainers();
assertThat(initContainers, is(notNullValue()));
assertThat(initContainers.size() > 0, is(true));
boolean isInitKafkaConnect = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaConnectCluster.INIT_NAME));
assertThat(isInitKafkaConnect, is(true));
}
}
use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.
the class KafkaBrokerConfigurationBuilderTest method testRackAndBrokerId.
@ParallelTest
public void testRackAndBrokerId() {
String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withBrokerId().withRackId(new Rack("failure-domain.kubernetes.io/zone")).build();
assertThat(configuration, isEquivalent("broker.id=${STRIMZI_BROKER_ID}\n" + "node.id=${STRIMZI_BROKER_ID}\n" + "broker.rack=${STRIMZI_RACK_ID}"));
}
use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.
the class KafkaClusterTest method checkStatefulSet.
private void checkStatefulSet(StatefulSet sts, Kafka cm, boolean isOpenShift) {
assertThat(sts.getMetadata().getName(), is(KafkaCluster.kafkaClusterName(cluster)));
// ... in the same namespace ...
assertThat(sts.getMetadata().getNamespace(), is(namespace));
// ... with these labels
assertThat(sts.getMetadata().getLabels(), is(expectedLabels()));
assertThat(sts.getSpec().getSelector().getMatchLabels(), is(expectedSelectorLabels()));
assertThat(sts.getSpec().getTemplate().getSpec().getSchedulerName(), is("default-scheduler"));
List<Container> containers = sts.getSpec().getTemplate().getSpec().getContainers();
assertThat(containers.size(), is(1));
// checks on the main Kafka container
assertThat(sts.getSpec().getReplicas(), is(Integer.valueOf(replicas)));
assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.PARALLEL.toValue()));
assertThat(containers.get(0).getImage(), is(image));
assertThat(containers.get(0).getLivenessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
assertThat(containers.get(0).getLivenessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
assertThat(containers.get(0).getLivenessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
assertThat(containers.get(0).getLivenessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
assertThat(containers.get(0).getLivenessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
assertThat(containers.get(0).getReadinessProbe().getTimeoutSeconds(), is(Integer.valueOf(healthTimeout)));
assertThat(containers.get(0).getReadinessProbe().getInitialDelaySeconds(), is(Integer.valueOf(healthDelay)));
assertThat(containers.get(0).getReadinessProbe().getFailureThreshold(), is(Integer.valueOf(10)));
assertThat(containers.get(0).getReadinessProbe().getSuccessThreshold(), is(Integer.valueOf(4)));
assertThat(containers.get(0).getReadinessProbe().getPeriodSeconds(), is(Integer.valueOf(33)));
assertThat(AbstractModel.containerEnvVars(containers.get(0)).get(KafkaCluster.ENV_VAR_STRIMZI_KAFKA_GC_LOG_ENABLED), is(Boolean.toString(AbstractModel.DEFAULT_JVM_GC_LOGGING_ENABLED)));
assertThat(containers.get(0).getVolumeMounts().get(1).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME));
assertThat(containers.get(0).getVolumeMounts().get(1).getMountPath(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_MOUNT_PATH));
assertThat(containers.get(0).getVolumeMounts().get(3).getName(), is(KafkaCluster.BROKER_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(3).getMountPath(), is(KafkaCluster.BROKER_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(2).getName(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(2).getMountPath(), is(KafkaCluster.CLUSTER_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getVolumeMounts().get(4).getName(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME));
assertThat(containers.get(0).getVolumeMounts().get(4).getMountPath(), is(KafkaCluster.CLIENT_CA_CERTS_VOLUME_MOUNT));
assertThat(containers.get(0).getPorts().get(0).getName(), is(KafkaCluster.CONTROLPLANE_PORT_NAME));
assertThat(containers.get(0).getPorts().get(0).getContainerPort(), is(KafkaCluster.CONTROLPLANE_PORT));
assertThat(containers.get(0).getPorts().get(0).getProtocol(), is("TCP"));
assertThat(containers.get(0).getPorts().get(1).getName(), is(KafkaCluster.REPLICATION_PORT_NAME));
assertThat(containers.get(0).getPorts().get(1).getContainerPort(), is(KafkaCluster.REPLICATION_PORT));
assertThat(containers.get(0).getPorts().get(1).getProtocol(), is("TCP"));
assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(volume -> volume.getName().equalsIgnoreCase("strimzi-tmp")).findFirst().get().getEmptyDir().getSizeLimit(), is(new Quantity(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_SIZE)));
if (cm.getSpec().getKafka().getRack() != null) {
Rack rack = cm.getSpec().getKafka().getRack();
// check that the pod spec contains anti-affinity rules with the right topology key
PodSpec podSpec = sts.getSpec().getTemplate().getSpec();
assertThat(podSpec.getAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity(), is(notNullValue()));
assertThat(podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution(), is(notNullValue()));
List<WeightedPodAffinityTerm> terms = podSpec.getAffinity().getPodAntiAffinity().getPreferredDuringSchedulingIgnoredDuringExecution();
assertThat(terms, is(notNullValue()));
assertThat(terms.size() > 0, is(true));
boolean isTopologyKey = terms.stream().anyMatch(term -> term.getPodAffinityTerm().getTopologyKey().equals(rack.getTopologyKey()));
assertThat(isTopologyKey, is(true));
// check that pod spec contains the init Kafka container
List<Container> initContainers = podSpec.getInitContainers();
assertThat(initContainers, is(notNullValue()));
assertThat(initContainers.size() > 0, is(true));
boolean isInitKafka = initContainers.stream().anyMatch(container -> container.getName().equals(KafkaCluster.INIT_NAME));
assertThat(isInitKafka, is(true));
}
}
use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.
the class KafkaBrokerConfigurationBuilderTest method testRackId.
@ParallelTest
public void testRackId() {
String configuration = new KafkaBrokerConfigurationBuilder(Reconciliation.DUMMY_RECONCILIATION).withRackId(new Rack("failure-domain.kubernetes.io/zone")).build();
assertThat(configuration, isEquivalent("broker.rack=${STRIMZI_RACK_ID}"));
}
use of io.strimzi.api.kafka.model.Rack in project strimzi by strimzi.
the class SpecificIsolatedST method testRackAwareConnectWrongDeployment.
@IsolatedTest("Modification of shared Cluster Operator configuration")
@Tag(CONNECT)
@Tag(CONNECT_COMPONENTS)
@Tag(REGRESSION)
@Tag(INTERNAL_CLIENTS_USED)
void testRackAwareConnectWrongDeployment(ExtensionContext extensionContext) {
assumeFalse(Environment.isNamespaceRbacScope());
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
Map<String, String> label = Collections.singletonMap("my-label", "value");
Map<String, String> anno = Collections.singletonMap("my-annotation", "value");
// We need to update CO configuration to set OPERATION_TIMEOUT to shorter value, because we expect timeout in that test
Map<String, String> coSnapshot = DeploymentUtils.depSnapshot(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName());
// We have to install CO in class stack, otherwise it will be deleted at the end of test case and all following tests will fail
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).withOperationTimeout(CO_OPERATION_TIMEOUT_SHORT).withReconciliationInterval(Constants.RECONCILIATION_INTERVAL).createInstallation().runBundleInstallation();
coSnapshot = DeploymentUtils.waitTillDepHasRolled(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName(), 1, coSnapshot);
String wrongRackKey = "wrong-key";
String rackKey = "rack-key";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().editKafka().withNewRack().withTopologyKey(rackKey).endRack().addToConfig("replica.selector.class", "org.apache.kafka.common.replica.RackAwareReplicaSelector").endKafka().endSpec().build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(INFRA_NAMESPACE, false, kafkaClientsName).build());
String kafkaClientsPodName = kubeClient(INFRA_NAMESPACE).listPodsByPrefixInName(INFRA_NAMESPACE, kafkaClientsName).get(0).getMetadata().getName();
LOGGER.info("Deploy KafkaConnect with wrong rack-aware topology key: {}", wrongRackKey);
KafkaConnect kc = KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, clusterName, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().editSpec().withNewRack().withTopologyKey(wrongRackKey).endRack().addToConfig("key.converter.schemas.enable", false).addToConfig("value.converter.schemas.enable", false).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").editOrNewTemplate().withNewClusterRoleBinding().withNewMetadata().withAnnotations(anno).withLabels(label).endMetadata().endClusterRoleBinding().endTemplate().endSpec().build();
resourceManager.createResource(extensionContext, false, kc);
NetworkPolicyResource.deployNetworkPolicyForResource(extensionContext, kc, KafkaConnectResources.deploymentName(clusterName));
PodUtils.waitForPendingPod(INFRA_NAMESPACE, clusterName + "-connect");
List<String> connectWrongPods = kubeClient(INFRA_NAMESPACE).listPodNames(INFRA_NAMESPACE, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND);
String connectWrongPodName = connectWrongPods.get(0);
LOGGER.info("Waiting for ClusterOperator to get timeout operation of incorrectly set up KafkaConnect");
KafkaConnectUtils.waitForKafkaConnectCondition("TimeoutException", "NotReady", INFRA_NAMESPACE, clusterName);
PodStatus kcWrongStatus = kubeClient().getPod(INFRA_NAMESPACE, connectWrongPodName).getStatus();
assertThat("Unschedulable", is(kcWrongStatus.getConditions().get(0).getReason()));
assertThat("PodScheduled", is(kcWrongStatus.getConditions().get(0).getType()));
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> {
kafkaConnect.getSpec().setRack(new Rack(rackKey));
}, INFRA_NAMESPACE);
KafkaConnectUtils.waitForConnectReady(INFRA_NAMESPACE, clusterName);
LOGGER.info("KafkaConnect is ready with changed rack key: '{}'.", rackKey);
LOGGER.info("Verify KafkaConnect rack key update");
kc = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get();
assertThat(kc.getSpec().getRack().getTopologyKey(), is(rackKey));
List<String> kcPods = kubeClient(INFRA_NAMESPACE).listPodNames(INFRA_NAMESPACE, clusterName, Labels.STRIMZI_KIND_LABEL, KafkaConnect.RESOURCE_KIND);
KafkaConnectUtils.sendReceiveMessagesThroughConnect(kcPods.get(0), TOPIC_NAME, kafkaClientsPodName, INFRA_NAMESPACE, clusterName);
// check the ClusterRoleBinding annotations and labels in Kafka cluster
Map<String, String> actualLabel = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getTemplate().getClusterRoleBinding().getMetadata().getLabels();
Map<String, String> actualAnno = KafkaConnectResource.kafkaConnectClient().inNamespace(INFRA_NAMESPACE).withName(clusterName).get().getSpec().getTemplate().getClusterRoleBinding().getMetadata().getAnnotations();
assertThat(actualLabel, is(label));
assertThat(actualAnno, is(anno));
// Revert changes for CO deployment
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(INFRA_NAMESPACE).createInstallation().runBundleInstallation();
DeploymentUtils.waitTillDepHasRolled(INFRA_NAMESPACE, ResourceManager.getCoDeploymentName(), 1, coSnapshot);
}
Aggregations