use of io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder in project strimzi by strimzi.
the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithPlainListener.
@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
void testNetworkPoliciesWithPlainListener(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(namespace).createInstallation().runInstallation();
String allowedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-allow";
String deniedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-deny";
Map<String, String> matchLabelForPlain = new HashMap<>();
matchLabelForPlain.put("app", allowedKafkaClientsName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.PLAIN_LISTENER_DEFAULT_NAME).withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(matchLabelForPlain).endPodSelector().build()).build()).endKafka().withNewKafkaExporter().endKafkaExporter().endSpec().build());
NetworkPolicyResource.allowNetworkPolicySettingsForKafkaExporter(extensionContext, clusterName);
String topic0 = "topic-example-0";
String topic1 = "topic-example-1";
String userName = "user-example";
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, kafkaUser);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic0).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, allowedKafkaClientsName, kafkaUser).build());
String allowedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(allowedKafkaClientsName).get(0).getMetadata().getName();
LOGGER.info("Verifying that {} pod is able to exchange messages", allowedKafkaClientsPodName);
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(allowedKafkaClientsPodName).withTopicName(topic0).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withSecurityProtocol(SecurityProtocol.PLAINTEXT).withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesPlain(), internalKafkaClient.receiveMessagesPlain());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, deniedKafkaClientsName, kafkaUser).build());
String deniedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(deniedKafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient newInternalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(deniedKafkaClientsPodName).withTopicName(topic1).build();
LOGGER.info("Verifying that {} pod is not able to exchange messages", deniedKafkaClientsPodName);
assertThrows(AssertionError.class, () -> {
newInternalKafkaClient.checkProducedAndConsumedMessages(newInternalKafkaClient.sendMessagesPlain(), newInternalKafkaClient.receiveMessagesPlain());
});
LOGGER.info("Check metrics exported by Kafka Exporter");
MetricsCollector metricsCollector = new MetricsCollector.Builder().withScraperPodName(allowedKafkaClientsPodName).withComponentName(clusterName).withComponentType(ComponentType.KafkaExporter).build();
Map<String, String> kafkaExporterMetricsData = metricsCollector.collectMetricsFromPods();
assertThat("Kafka Exporter metrics should be non-empty", kafkaExporterMetricsData.size() > 0);
for (Map.Entry<String, String> entry : kafkaExporterMetricsData.entrySet()) {
assertThat("Value from collected metric should be non-empty", !entry.getValue().isEmpty());
assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_consumergroup_current_offset"));
assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic0 + "\"} 1"));
assertThat("Metrics doesn't contain specific values", entry.getValue().contains("kafka_topic_partitions{topic=\"" + topic1 + "\"} 1"));
}
}
use of io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder in project strimzi by strimzi.
the class NetworkPoliciesIsolatedST method testNetworkPoliciesWithTlsListener.
@IsolatedTest("Specific cluster operator for test case")
@Tag(INTERNAL_CLIENTS_USED)
void testNetworkPoliciesWithTlsListener(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
clusterOperator.unInstall();
clusterOperator = new SetupClusterOperator.SetupClusterOperatorBuilder().withExtensionContext(BeforeAllOnce.getSharedExtensionContext()).withNamespace(namespace).createInstallation().runInstallation();
String allowedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-allow";
String deniedKafkaClientsName = clusterName + "-" + Constants.KAFKA_CLIENTS + "-deny";
Map<String, String> matchLabelsForTls = new HashMap<>();
matchLabelsForTls.put("app", allowedKafkaClientsName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).editSpec().editKafka().withListeners(new GenericKafkaListenerBuilder().withName(Constants.TLS_LISTENER_DEFAULT_NAME).withPort(9093).withType(KafkaListenerType.INTERNAL).withTls(true).withNewKafkaListenerAuthenticationScramSha512Auth().endKafkaListenerAuthenticationScramSha512Auth().withNetworkPolicyPeers(new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(matchLabelsForTls).endPodSelector().build()).build()).endKafka().endSpec().build());
String topic0 = "topic-example-0";
String topic1 = "topic-example-1";
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic0).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topic1).build());
String userName = "user-example";
KafkaUser kafkaUser = KafkaUserTemplates.scramShaUser(clusterName, userName).build();
resourceManager.createResource(extensionContext, kafkaUser);
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, allowedKafkaClientsName, kafkaUser).build());
String allowedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(allowedKafkaClientsName).get(0).getMetadata().getName();
LOGGER.info("Verifying that {} pod is able to exchange messages", allowedKafkaClientsPodName);
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder().withUsingPodName(allowedKafkaClientsPodName).withTopicName(topic0).withNamespaceName(namespace).withClusterName(clusterName).withMessageCount(MESSAGE_COUNT).withKafkaUsername(userName).withListenerName(Constants.TLS_LISTENER_DEFAULT_NAME).build();
internalKafkaClient.checkProducedAndConsumedMessages(internalKafkaClient.sendMessagesTls(), internalKafkaClient.receiveMessagesTls());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(true, deniedKafkaClientsName, kafkaUser).build());
String deniedKafkaClientsPodName = kubeClient().listPodsByPrefixInName(deniedKafkaClientsName).get(0).getMetadata().getName();
InternalKafkaClient newInternalKafkaClient = internalKafkaClient.toBuilder().withUsingPodName(deniedKafkaClientsPodName).withTopicName(topic1).withConsumerGroupName(ClientUtils.generateRandomConsumerGroup()).build();
LOGGER.info("Verifying that {} pod is not able to exchange messages", deniedKafkaClientsPodName);
assertThrows(AssertionError.class, () -> {
newInternalKafkaClient.checkProducedAndConsumedMessages(newInternalKafkaClient.sendMessagesTls(), newInternalKafkaClient.receiveMessagesTls());
});
}
use of io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder in project strimzi by strimzi.
the class KafkaConnectCluster method generateNetworkPolicy.
/**
* Generates the NetworkPolicies relevant for Kafka Connect nodes
*
* @param connectorOperatorEnabled Whether the ConnectorOperator is enabled or not
* @param operatorNamespace Namespace where the Strimzi Cluster Operator runs. Null if not configured.
* @param operatorNamespaceLabels Labels of the namespace where the Strimzi Cluster Operator runs. Null if not configured.
*
* @return The network policy.
*/
public NetworkPolicy generateNetworkPolicy(boolean connectorOperatorEnabled, String operatorNamespace, Labels operatorNamespaceLabels) {
if (connectorOperatorEnabled) {
List<NetworkPolicyIngressRule> rules = new ArrayList<>(2);
// Give CO access to the REST API
NetworkPolicyIngressRule restApiRule = new NetworkPolicyIngressRuleBuilder().addNewPort().withNewPort(REST_API_PORT).withProtocol("TCP").endPort().build();
// OCP 3.11 doesn't support network policies with the `from` section containing a namespace.
// Since the CO can run in a different namespace, we have to leave it wide open on OCP 3.11
// Therefore these rules are set only when using something else than OCP 3.11 and leaving
// the `from` section empty on 3.11
List<NetworkPolicyPeer> peers = new ArrayList<>(2);
// Other connect pods in the same cluster need to talk with each other over the REST API
NetworkPolicyPeer connectPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels(getSelectorLabels().toMap()).endPodSelector().build();
peers.add(connectPeer);
// CO needs to talk with the Connect pods to manage connectors
NetworkPolicyPeer clusterOperatorPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().addToMatchLabels(Labels.STRIMZI_KIND_LABEL, "cluster-operator").endPodSelector().build();
ModelUtils.setClusterOperatorNetworkPolicyNamespaceSelector(clusterOperatorPeer, namespace, operatorNamespace, operatorNamespaceLabels);
peers.add(clusterOperatorPeer);
restApiRule.setFrom(peers);
rules.add(restApiRule);
// If metrics are enabled, we have to open them as well. Otherwise they will be blocked.
if (isMetricsEnabled) {
NetworkPolicyIngressRule metricsRule = new NetworkPolicyIngressRuleBuilder().addNewPort().withNewPort(METRICS_PORT).withProtocol("TCP").endPort().withFrom().build();
rules.add(metricsRule);
}
NetworkPolicy networkPolicy = new NetworkPolicyBuilder().withNewMetadata().withName(name).withNamespace(namespace).withLabels(labels.toMap()).withOwnerReferences(createOwnerReference()).endMetadata().withNewSpec().withNewPodSelector().addToMatchLabels(getSelectorLabels().toMap()).endPodSelector().withIngress(rules).endSpec().build();
LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy);
return networkPolicy;
} else {
return null;
}
}
use of io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder in project strimzi by strimzi.
the class CruiseControlTest method testRestApiPortNetworkPolicyWithNamespaceLabels.
@ParallelTest
public void testRestApiPortNetworkPolicyWithNamespaceLabels() {
NetworkPolicyPeer clusterOperatorPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")).endPodSelector().withNewNamespaceSelector().withMatchLabels(Collections.singletonMap("nsLabelKey", "nsLabelValue")).endNamespaceSelector().build();
NetworkPolicy np = cc.generateNetworkPolicy(null, Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue")));
assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(CruiseControl.REST_API_PORT))).findFirst().orElse(null), is(notNullValue()));
List<NetworkPolicyPeer> rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(CruiseControl.REST_API_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow();
assertThat(rules.size(), is(1));
assertThat(rules.contains(clusterOperatorPeer), is(true));
}
use of io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeerBuilder in project strimzi by strimzi.
the class KafkaClusterTest method testReplicationPortNetworkPolicy.
@ParallelTest
public void testReplicationPortNetworkPolicy() {
NetworkPolicyPeer kafkaBrokersPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaCluster.kafkaClusterName(cluster))).endPodSelector().build();
NetworkPolicyPeer eoPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, EntityOperator.entityOperatorName(cluster))).endPodSelector().build();
NetworkPolicyPeer kafkaExporterPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, KafkaExporter.kafkaExporterName(cluster))).endPodSelector().build();
NetworkPolicyPeer cruiseControlPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_NAME_LABEL, CruiseControl.cruiseControlName(cluster))).endPodSelector().build();
NetworkPolicyPeer clusterOperatorPeer = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")).endPodSelector().withNewNamespaceSelector().endNamespaceSelector().build();
NetworkPolicyPeer clusterOperatorPeerSameNamespace = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")).endPodSelector().build();
NetworkPolicyPeer clusterOperatorPeerNamespaceWithLabels = new NetworkPolicyPeerBuilder().withNewPodSelector().withMatchLabels(Collections.singletonMap(Labels.STRIMZI_KIND_LABEL, "cluster-operator")).endPodSelector().withNewNamespaceSelector().withMatchLabels(Collections.singletonMap("nsLabelKey", "nsLabelValue")).endNamespaceSelector().build();
Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, configuration, emptyMap());
KafkaCluster k = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS);
// Check Network Policies => Different namespace
NetworkPolicy np = k.generateNetworkPolicy("operator-namespace", null);
assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).findFirst().orElse(null), is(notNullValue()));
List<NetworkPolicyPeer> rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow();
assertThat(rules.size(), is(5));
assertThat(rules.contains(kafkaBrokersPeer), is(true));
assertThat(rules.contains(eoPeer), is(true));
assertThat(rules.contains(kafkaExporterPeer), is(true));
assertThat(rules.contains(cruiseControlPeer), is(true));
assertThat(rules.contains(clusterOperatorPeer), is(true));
// Check Network Policies => Same namespace
np = k.generateNetworkPolicy(namespace, null);
assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).findFirst().orElse(null), is(notNullValue()));
rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow();
assertThat(rules.size(), is(5));
assertThat(rules.contains(kafkaBrokersPeer), is(true));
assertThat(rules.contains(eoPeer), is(true));
assertThat(rules.contains(kafkaExporterPeer), is(true));
assertThat(rules.contains(cruiseControlPeer), is(true));
assertThat(rules.contains(clusterOperatorPeerSameNamespace), is(true));
// Check Network Policies => Namespace with Labels
np = k.generateNetworkPolicy("operator-namespace", Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue")));
assertThat(np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).findFirst().orElse(null), is(notNullValue()));
rules = np.getSpec().getIngress().stream().filter(ing -> ing.getPorts().get(0).getPort().equals(new IntOrString(KafkaCluster.REPLICATION_PORT))).map(NetworkPolicyIngressRule::getFrom).findFirst().orElseThrow();
assertThat(rules.size(), is(5));
assertThat(rules.contains(kafkaBrokersPeer), is(true));
assertThat(rules.contains(eoPeer), is(true));
assertThat(rules.contains(kafkaExporterPeer), is(true));
assertThat(rules.contains(cruiseControlPeer), is(true));
assertThat(rules.contains(clusterOperatorPeerNamespaceWithLabels), is(true));
}
Aggregations