use of io.fabric8.kubernetes.examples.crds.Dummy in project strimzi by strimzi.
the class ConnectorMockTest method testChangeStrimziClusterLabel.
/**
* Change the cluster label from one cluster to another
* check the connector is deleted from the old cluster
* check the connector is added to the new cluster
*/
@Test
public void testChangeStrimziClusterLabel(VertxTestContext context) throws InterruptedException {
String oldConnectClusterName = "cluster1";
String newConnectClusterName = "cluster2";
String connectorName = "connector";
// Create two connect clusters
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(oldConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(oldConnectClusterName);
KafkaConnect connect2 = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(newConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect2);
waitForConnectReady(newConnectClusterName);
// Create KafkaConnector associated with the first cluster using the Strimzi Cluster label and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, oldConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
// triggered twice (Connect creation, Connector Status update) for the first cluster
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// never triggered for the second cluster as connector's Strimzi cluster label does not match cluster 2
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// patch connector with new Strimzi cluster label associated with cluster 2
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).patch(new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, newConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build());
waitForConnectorReady(connectorName);
// Note: The connector does not get deleted immediately from the first cluster, only on the next timed reconciliation
verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Force reconciliation to assert connector deletion request occurs for first cluster
Checkpoint async = context.checkpoint();
kafkaConnectOperator.reconcile(new Reconciliation("test", "KafkaConnect", NAMESPACE, oldConnectClusterName)).onComplete(context.succeeding(v -> context.verify(() -> {
verify(api, times(1)).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
async.flag();
})));
}
use of io.fabric8.kubernetes.examples.crds.Dummy in project strimzi-kafka-operator by strimzi.
the class KafkaUserModelTest method testMissingOrWrongCaSecrets.
@Test
public void testMissingOrWrongCaSecrets() {
KafkaUserModel model = KafkaUserModel.fromCrd(tlsUser, UserOperatorConfig.DEFAULT_SECRET_PREFIX, UserOperatorConfig.DEFAULT_STRIMZI_ACLS_ADMIN_API_SUPPORTED, false);
Secret emptySecret = new SecretBuilder().withNewMetadata().withName("dummy-ca-secret").endMetadata().withData(Map.of()).build();
InvalidCertificateException e = assertThrows(InvalidCertificateException.class, () -> {
model.maybeGenerateCertificates(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, null, clientsCaKey, null, 365, 30, null, () -> new Date());
});
assertThat(e.getMessage(), is("The Clients CA Cert Secret is missing"));
e = assertThrows(InvalidCertificateException.class, () -> {
model.maybeGenerateCertificates(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, clientsCaCert, null, null, 365, 30, null, () -> new Date());
});
assertThat(e.getMessage(), is("The Clients CA Key Secret is missing"));
e = assertThrows(InvalidCertificateException.class, () -> {
model.maybeGenerateCertificates(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, emptySecret, clientsCaKey, null, 365, 30, null, () -> new Date());
});
assertThat(e.getMessage(), is("The Clients CA Cert Secret is missing the ca.crt file"));
e = assertThrows(InvalidCertificateException.class, () -> {
model.maybeGenerateCertificates(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, clientsCaCert, emptySecret, null, 365, 30, null, () -> new Date());
});
assertThat(e.getMessage(), is("The Clients CA Key Secret is missing the ca.key file"));
}
use of io.fabric8.kubernetes.examples.crds.Dummy in project strimzi-kafka-operator by strimzi.
the class ClusterCaTest method testNotRemoveOldCertificateWithCustomCa.
@ParallelTest
public void testNotRemoveOldCertificateWithCustomCa() {
Map<String, String> clusterCaCertData = new HashMap<>();
clusterCaCertData.put(Ca.CA_CRT, Base64.getEncoder().encodeToString("dummy-crt".getBytes()));
clusterCaCertData.put(Ca.CA_STORE, Base64.getEncoder().encodeToString("dummy-p12".getBytes()));
clusterCaCertData.put(Ca.CA_STORE_PASSWORD, Base64.getEncoder().encodeToString("dummy-password".getBytes()));
Secret clusterCaCert = new SecretBuilder().withNewMetadata().withName("my-cluster-cluster-ca-cert").endMetadata().withData(clusterCaCertData).build();
Map<String, String> clusterCaKeyData = new HashMap<>();
clusterCaKeyData.put(Ca.CA_KEY, Base64.getEncoder().encodeToString("dummy-key".getBytes()));
Secret clusterCaKey = new SecretBuilder().withNewMetadata().withName("my-cluster-cluster-ca").endMetadata().withData(clusterCaKeyData).build();
ClusterCa clusterCa = new ClusterCa(Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, clusterCaCert, clusterCaKey, 0, 0, false, CertificateExpirationPolicy.RENEW_CERTIFICATE);
// simulate a renewal with new private key ...
clusterCaKeyData.put(Ca.CA_KEY, Base64.getEncoder().encodeToString("new-dummy-key".getBytes()));
clusterCaKey.setData(clusterCaKeyData);
// ... also saving the old certificate
clusterCaCertData.put("ca-2023-03-23T09-00-00Z.crt", clusterCaCertData.get(Ca.CA_CRT));
clusterCaCertData.put(Ca.CA_CRT, Base64.getEncoder().encodeToString("new-dummy-crt".getBytes()));
clusterCaCertData.put(Ca.CA_STORE, Base64.getEncoder().encodeToString("updated-dummy-p12".getBytes()));
clusterCaCert.setData(clusterCaCertData);
clusterCa.maybeDeleteOldCerts();
// checking that the cluster CA related Secret was not touched by the operator
Map<String, String> clusterCaCertDataInSecret = clusterCa.caCertSecret().getData();
assertThat(clusterCaCertDataInSecret.size(), is(4));
assertThat(new String(Base64.getDecoder().decode(clusterCaCertDataInSecret.get(Ca.CA_CRT))).equals("new-dummy-crt"), is(true));
assertThat(new String(Base64.getDecoder().decode(clusterCaCertDataInSecret.get(Ca.CA_STORE))).equals("updated-dummy-p12"), is(true));
assertThat(new String(Base64.getDecoder().decode(clusterCaCertDataInSecret.get(Ca.CA_STORE_PASSWORD))).equals("dummy-password"), is(true));
assertThat(new String(Base64.getDecoder().decode(clusterCaCertDataInSecret.get("ca-2023-03-23T09-00-00Z.crt"))).equals("dummy-crt"), is(true));
}
use of io.fabric8.kubernetes.examples.crds.Dummy in project fabric8 by fabric8io.
the class DevOpsConnectors method createDevOpsConnector.
/**
* Returns a DevOpsConnector for the given project repository
*/
public static DevOpsConnector createDevOpsConnector(ProjectRepository project) {
DevOpsConnector connector = new DevOpsConnector();
connector.setGitUrl(project.getGitUrl());
String repoName = project.getRepoName();
connector.setRepoName(repoName);
String username = project.getUser();
connector.setUsername(username);
String buildName = ProjectRepositories.createBuildName(username, repoName);
if (project.isGitHubProject()) {
// lets default the issue tracker
String url = project.getUrl();
if (Strings.isNotBlank(url)) {
connector.setIssueTrackerUrl(URLUtils.pathJoin(url, "issues"));
connector.setTeamUrl(URLUtils.pathJoin(url, "graphs/contributors"));
connector.setReleasesUrl(URLUtils.pathJoin(url, "tags"));
connector.setRepositoryBrowseLink(url);
}
ProjectConfig config = ProjectConfigs.loadFromUrl(URLUtils.pathJoin(url, "blob/master/fabric8.yml"));
if (config == null) {
config = new ProjectConfig();
// lets add a dummy build so we can at least build snapshots on demand in OpenShift
config.setPipeline("maven/Deploy.groovy");
}
config.setBuildName(buildName);
connector.setProjectConfig(config);
connector.setRegisterWebHooks(false);
System.out.println("Created config " + config.getBuildName() + " with flow " + config.getPipeline());
}
return connector;
}
use of io.fabric8.kubernetes.examples.crds.Dummy in project fabric8 by jboss-fuse.
the class CreateProfileZipMojo method generateZip.
protected void generateZip() throws DependencyTreeBuilderException, MojoExecutionException, IOException, MojoFailureException {
ProjectRequirements requirements = new ProjectRequirements();
DependencyDTO rootDependency = null;
if (isIncludeArtifact()) {
rootDependency = loadRootDependency();
requirements.setRootDependency(rootDependency);
}
configureRequirements(requirements);
if (isIncludeArtifact()) {
addProjectArtifactBundle(requirements);
}
File profileBuildDir = createProfileBuildDir(requirements.getProfileId());
boolean hasConfigDir = profileConfigDir.isDirectory();
if (hasConfigDir) {
copyProfileConfigFiles(profileBuildDir, profileConfigDir);
} else {
getLog().info("The profile configuration files directory " + profileConfigDir + " doesn't exist, so not copying any additional project documentation or configuration files");
}
// to avoid generating dummy profiles for parent poms
if (hasConfigDir || rootDependency != null || notEmpty(requirements.getBundles()) || notEmpty(requirements.getFeatures()) || notEmpty(requirements.getFeatureRepositories())) {
if (includeReadMe) {
copyReadMe(project.getFile().getParentFile(), profileBuildDir);
}
if (generateSummaryFile) {
String description = project.getDescription();
if (Strings.isNotBlank(description)) {
File summaryMd = new File(profileBuildDir, "Summary.md");
summaryMd.getParentFile().mkdirs();
if (!summaryMd.exists()) {
byte[] bytes = description.getBytes();
Files.copy(new ByteArrayInputStream(bytes), new FileOutputStream(summaryMd));
}
}
}
if (isIncludeArtifact()) {
writeProfileRequirements(requirements, profileBuildDir);
}
generateFabricAgentProperties(requirements, new File(profileBuildDir, "io.fabric8.agent.properties"));
// only generate if its a WAR project
if ("war".equals(project.getPackaging())) {
generateFabricContextPathProperties(requirements, new File(profileBuildDir, Constants.WEB_CONTEXT_PATHS_PID + ".properties"));
}
Zips.createZipFile(getLog(), buildDir, outputFile);
projectHelper.attachArtifact(project, artifactType, artifactClassifier, outputFile);
getLog().info("Created profile zip file: " + outputFile);
}
}
Aggregations