use of org.bf2.cos.fleetshard.support.resources.Namespaces in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SuiteUnitTest method setupMockServer.
@BeforeAll
void setupMockServer() throws Exception {
PodList expectedPodList = new PodListBuilder().withItems(new PodBuilder().withNewMetadata().withName("pod1").withNamespace(TEST_NS).endMetadata().build(), new PodBuilder().withNewMetadata().withName("pod2").withNamespace(TEST_NS).endMetadata().build()).build();
for (Pod p : expectedPodList.getItems()) {
mockServer.getClient().pods().inNamespace(TEST_NS).create(p);
}
mockServer.getClient().namespaces().createOrReplace(new NamespaceBuilder().withNewMetadata().withName("keycloak").endMetadata().build());
mockServer.getClient().pods().inNamespace("keycloak").createOrReplace(new PodBuilder().withNewMetadata().withName("keycloak-0").withNamespace("keycloak").endMetadata().build());
SecurityUtils.TlsConfig tls = SecurityUtils.getTLSConfig("keycloak.svc");
Secret keycloakCert = new SecretBuilder().withNewMetadata().withName("sso-x509-https-secret").withNamespace("keycloak").endMetadata().withType("kubernetes.io/tls").withData(Map.of("tls.crt", new String(Base64.getEncoder().encode(tls.getCert().getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8), "tls.key", new String(Base64.getEncoder().encode(tls.getKey().getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8))).build();
mockServer.getClient().secrets().inNamespace("keycloak").createOrReplace(keycloakCert);
mockServer.getClient().secrets().inNamespace("keycloak").createOrReplace(new SecretBuilder().withNewMetadata().withName("credential-example-keycloak").endMetadata().withData(Map.of("ADMIN_USERNAME", "YWRtaW4=", "ADMIN_PASSWORD", "YWRtaW4=")).build());
}
use of org.bf2.cos.fleetshard.support.resources.Namespaces in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaProvisioner method install.
/**
* Install this Kafka provisioner. This can be called once per test class or per test method.
*/
public void install() throws Exception {
// delete/create the namespaces to be used
Map<String, String> nsAnnotations = new HashMap<>();
if (PerformanceEnvironment.KAFKA_COLLECT_LOG) {
nsAnnotations.put(Constants.ORG_BF2_KAFKA_PERFORMANCE_COLLECTPODLOG, "true");
}
cluster.waitForDeleteNamespace(StrimziOperatorManager.OPERATOR_NS);
FleetShardOperatorManager.deleteFleetShard(cluster.kubeClient()).get(2, TimeUnit.MINUTES);
cluster.createNamespace(Constants.KAFKA_NAMESPACE, nsAnnotations, Map.of());
List<Node> workers = cluster.getWorkerNodes();
boolean smallNodes = workers.stream().anyMatch(n -> TestUtils.getMaxAvailableResources(n).cpuMillis < 3000);
if (smallNodes) {
MixedOperation<Deployment, DeploymentList, RollableScalableResource<Deployment>> deployments = cluster.kubeClient().client().apps().deployments();
this.informer = deployments.inAnyNamespace().inform(new ResourceEventHandler<Deployment>() {
@Override
public void onUpdate(Deployment oldObj, Deployment newObj) {
onAdd(newObj);
}
@Override
public void onDelete(Deployment obj, boolean deletedFinalStateUnknown) {
}
@Override
public void onAdd(Deployment obj) {
if (!obj.getMetadata().getNamespace().equals(StrimziOperatorManager.OPERATOR_NS) && !obj.getMetadata().getNamespace().equals(FleetShardOperatorManager.OPERATOR_NS)) {
return;
}
// patch any deployment that requests a lot of cpu, and make sure it's on the perf infra
deployments.inNamespace(obj.getMetadata().getNamespace()).withName(obj.getMetadata().getName()).edit(new TypedVisitor<ResourceRequirementsBuilder>() {
@Override
public void visit(ResourceRequirementsBuilder element) {
Quantity cpu = null;
if (element.getRequests() != null) {
cpu = element.getRequests().get("cpu");
}
if (cpu == null && element.getLimits() != null) {
cpu = element.getLimits().get("cpu");
}
if (cpu != null && Quantity.getAmountInBytes(cpu).compareTo(BigDecimal.valueOf(1)) > 0) {
element.addToRequests("cpu", Quantity.parse("1"));
}
}
});
}
});
}
// installs the Strimzi Operator using the OLM bundle
CompletableFuture<Void> strimziFuture = strimziManager.deployStrimziOperator();
cluster.connectNamespaceToMonitoringStack(StrimziOperatorManager.OPERATOR_NS);
// installs a cluster wide fleetshard operator
// not looking at the returned futures - it's assumed that we'll eventually wait on the managed kafka deployment
CompletableFuture<Void> future = FleetShardOperatorManager.deployFleetShardOperator(cluster.kubeClient());
CompletableFuture.allOf(future, strimziFuture).get(2, TimeUnit.MINUTES);
var agentResource = this.cluster.kubeClient().client().resource(new ManagedKafkaAgentBuilder().withNewMetadata().withName(ManagedKafkaAgentResourceClient.RESOURCE_NAME).withNamespace(FleetShardOperatorManager.OPERATOR_NS).endMetadata().withSpec(new ManagedKafkaAgentSpecBuilder().withNewObservability().withAccessToken("").withChannel("").withRepository("").withTag("").endObservability().build()).build());
agentResource.createOrReplace();
// FleetShardOperatorManager.deployFleetShardSync(cluster.kubeClient());
cluster.connectNamespaceToMonitoringStack(FleetShardOperatorManager.OPERATOR_NS);
strimziVersions = SyncApiClient.getSortedAvailableStrimziVersions(() -> agentResource.fromServer().get().getStatus()).collect(Collectors.toList());
}
use of org.bf2.cos.fleetshard.support.resources.Namespaces in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class TestExceptionCallbackListener method afterTestExecution.
@Override
public void afterTestExecution(ExtensionContext context) throws Exception {
for (KubeClusterResource kubeClusterResource : KubeClusterResource.getCurrentConnectedClusters()) {
KubeClient kubeClient = kubeClusterResource.kubeClient();
kubeClient.client().namespaces().list().getItems().stream().filter(ns -> checkAnnotation(ns, Constants.ORG_BF2_PERFORMANCE_CHECKRESTARTEDCONTAINERS)).forEach(ns -> {
List<Pod> podsWithRestartedContainers = kubeClient.client().pods().inNamespace(ns.getMetadata().getName()).list().getItems().stream().filter(p -> p.getStatus().getContainerStatuses().stream().anyMatch(cs -> cs.getRestartCount() > 0)).collect(Collectors.toList());
if (!podsWithRestartedContainers.isEmpty()) {
LOGGER.error("Found {} pod(s) with containers that had restarted at least once", podsWithRestartedContainers.size());
podsWithRestartedContainers.forEach(p -> {
p.getStatus().getContainerStatuses().stream().filter(cs -> cs.getRestartCount() > 0).forEach(cs -> {
LOGGER.error("Pod {} container {} restart count {}", p.getMetadata().getName(), cs.getName(), cs.getRestartCount());
});
});
if (context.getExecutionException().isEmpty()) {
// Fail the test
throw new RuntimeException("Found {} pod(s) with containers that had restarted at least once");
}
}
});
}
}
use of org.bf2.cos.fleetshard.support.resources.Namespaces in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class LogCollector method saveClusterState.
private static void saveClusterState(Path logpath) throws IOException {
KubeClient kube = KubeClient.getInstance();
Files.writeString(logpath.resolve("describe-cluster-nodes.log"), kube.cmdClient().exec(false, false, "describe", "nodes").out());
Files.writeString(logpath.resolve("all-events.log"), kube.cmdClient().exec(false, false, "get", "events", "--all-namespaces").out());
Files.writeString(logpath.resolve("pvs.log"), kube.cmdClient().exec(false, false, "describe", "pv").out());
Files.writeString(logpath.resolve("operator-routes.yml"), kube.cmdClient().exec(false, false, "get", "routes", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
Files.writeString(logpath.resolve("operator-services.yml"), kube.cmdClient().exec(false, false, "get", "service", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
Files.writeString(logpath.resolve("kas-fleetshard-operator-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("strimzi-kafka-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app.kubernetes.io/managed-by=strimzi-cluster-operator", "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("managedkafkas.yml"), kube.cmdClient().exec(false, false, "get", "managedkafka", "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("kafkas.yml"), kube.cmdClient().exec(false, false, "get", "kafka", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("pods-managed-by-operator.yml"), kube.cmdClient().exec(false, false, "get", "pods", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("operator-namespace-events.yml"), kube.cmdClient().exec(false, false, "get", "events", "-n", FleetShardOperatorManager.OPERATOR_NS).out());
Files.writeString(logpath.resolve("operator.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.OPERATOR_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
Files.writeString(logpath.resolve("sync.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.SYNC_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
StrimziOperatorManager.getStrimziOperatorPods().forEach(pod -> {
try {
Files.writeString(logpath.resolve(pod.getMetadata().getName() + ".log"), kube.cmdClient().exec(false, false, "logs", pod.getMetadata().getName(), "--tail", "-1", "-n", pod.getMetadata().getNamespace()).out());
} catch (Exception e) {
LOGGER.warn("Cannot get logs from pod {} in namespace {}", pod.getMetadata().getName(), pod.getMetadata().getNamespace());
}
});
}
use of org.bf2.cos.fleetshard.support.resources.Namespaces in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorNamespaceProvisioner method provisionNamespaces.
private void provisionNamespaces(Collection<ConnectorNamespace> namespaces, boolean sync) {
for (ConnectorNamespace namespace : namespaces) {
this.recorder.record(() -> provision(namespace), Tags.of(TAG_NAMESPACE_ID, namespace.getId()), e -> {
LOGGER.error("Failure while trying to provision connector namespace: id={}, revision={}", namespace.getId(), namespace.getResourceVersion(), e);
try {
MetaV1Condition condition = new MetaV1Condition();
condition.setType(Conditions.TYPE_READY);
condition.setStatus(Conditions.STATUS_FALSE);
condition.setReason(Conditions.FAILED_TO_CREATE_OR_UPDATE_RESOURCE_REASON);
condition.setMessage(e.getMessage());
ConnectorNamespaceStatus status = new ConnectorNamespaceStatus().id(namespace.getId()).version("" + namespace.getResourceVersion()).phase(ConnectorNamespaceState.DISCONNECTED).conditions(List.of(condition));
fleetManager.updateNamespaceStatus(fleetShard.getClusterId(), namespace.getId(), status);
} catch (Exception ex) {
LOGGER.warn("Error wile reporting failure to the control plane", e);
}
fleetShard.getConnectorCluster().ifPresent(cc -> {
fleetShard.broadcast("Warning", "FailedToCreateOrUpdateResource", String.format("Unable to create or update namespace %s, revision: %s, reason: %s", namespace.getId(), namespace.getResourceVersion(), e.getMessage()), cc);
});
});
}
if (sync) {
Set<String> knownIds = namespaces.stream().map(ConnectorNamespace::getId).collect(Collectors.toSet());
for (Namespace namespace : fleetShard.getNamespaces()) {
String nsId = Resources.getLabel(namespace, Resources.LABEL_NAMESPACE_ID);
if (nsId == null || knownIds.contains(nsId)) {
continue;
}
try {
Resources.setLabels(namespace, Resources.LABEL_NAMESPACE_STATE, Namespaces.PHASE_DELETED);
Resources.setLabels(namespace, Resources.LABEL_NAMESPACE_STATE_FORCED, "true");
fleetShard.getKubernetesClient().namespaces().withName(namespace.getMetadata().getName()).replace(namespace);
} catch (Exception e) {
LOGGER.warn("Error marking na {} for deletion (sync)", namespace.getMetadata().getName(), e);
}
}
}
}
Aggregations