use of io.strimzi.api.kafka.model.KafkaConnectBuilder in project strimzi by strimzi.
the class KafkaConnectBuildTest method testValidKanikoOptions.
@ParallelTest
public void testValidKanikoOptions() {
List<String> expectedArgs = new ArrayList<>(defaultArgs);
expectedArgs.add("--reproducible");
expectedArgs.add("--single-snapshot");
expectedArgs.add("--log-format=json");
KafkaConnect kc = new KafkaConnectBuilder().withNewMetadata().withName(cluster).withNamespace(namespace).endMetadata().withNewSpec().withBootstrapServers("my-kafka:9092").withNewBuild().withNewDockerOutput().withImage("my-image:latest").withPushSecret("my-docker-credentials").withAdditionalKanikoOptions("--reproducible", "--single-snapshot", "--log-format=json").endDockerOutput().withPlugins(new PluginBuilder().withName("my-connector").withArtifacts(jarArtifactWithChecksum).build(), new PluginBuilder().withName("my-connector2").withArtifacts(jarArtifactNoChecksum).build()).endBuild().endSpec().build();
KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS);
Pod pod = build.generateBuilderPod(true, ImagePullPolicy.IFNOTPRESENT, null, null);
assertThat(pod.getSpec().getContainers().get(0).getArgs(), is(expectedArgs));
}
use of io.strimzi.api.kafka.model.KafkaConnectBuilder in project strimzi by strimzi.
the class KafkaConnectBuildTest method testInvalidKanikoOptions.
@ParallelTest
public void testInvalidKanikoOptions() {
KafkaConnect kc = new KafkaConnectBuilder().withNewMetadata().withName(cluster).withNamespace(namespace).endMetadata().withNewSpec().withBootstrapServers("my-kafka:9092").withNewBuild().withNewDockerOutput().withImage("my-image:latest").withPushSecret("my-docker-credentials").withAdditionalKanikoOptions("--reproducible", "--reproducible-something", "--build-arg", "--single-snapshot", "--digest-file=/dev/null", "--log-format=json").endDockerOutput().withPlugins(new PluginBuilder().withName("my-connector").withArtifacts(jarArtifactWithChecksum).build(), new PluginBuilder().withName("my-connector2").withArtifacts(jarArtifactNoChecksum).build()).endBuild().endSpec().build();
InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS));
assertThat(e.getMessage(), containsString(".spec.build.additionalKanikoOptions contains forbidden options: [--reproducible-something, --build-arg, --digest-file]"));
}
use of io.strimzi.api.kafka.model.KafkaConnectBuilder in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorRestartFail.
/**
* Create connect, create connector, add restart annotation, fail to restart connector, check for condition
*/
@Test
public void testConnectorRestartFail() {
String connectName = "cluster";
String connectorName = "connector";
when(api.restart(anyString(), anyInt(), anyString())).thenAnswer(invocation -> Future.failedFuture(new ConnectRestException("GET", "/foo", 500, "Internal server error", "Bad stuff happened")));
// Create KafkaConnect cluster and wait till it's ready
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build());
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build());
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
verify(api, never()).restart(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, never()).restartTask(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), eq(0));
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).edit(connector -> new KafkaConnectorBuilder(connector).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_RESTART, "true").endMetadata().build());
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
waitForConnectorCondition(connectorName, "Warning", "RestartConnector");
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).restart(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, never()).restartTask(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), eq(0));
}
use of io.strimzi.api.kafka.model.KafkaConnectBuilder in project strimzi by strimzi.
the class ConnectorMockTest method testChangeStrimziClusterLabel.
/**
* Change the cluster label from one cluster to another
* check the connector is deleted from the old cluster
* check the connector is added to the new cluster
*/
@Test
public void testChangeStrimziClusterLabel(VertxTestContext context) throws InterruptedException {
String oldConnectClusterName = "cluster1";
String newConnectClusterName = "cluster2";
String connectorName = "connector";
// Create two connect clusters
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(oldConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(oldConnectClusterName);
KafkaConnect connect2 = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(newConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect2);
waitForConnectReady(newConnectClusterName);
// Create KafkaConnector associated with the first cluster using the Strimzi Cluster label and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, oldConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
// triggered twice (Connect creation, Connector Status update) for the first cluster
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// never triggered for the second cluster as connector's Strimzi cluster label does not match cluster 2
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// patch connector with new Strimzi cluster label associated with cluster 2
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).patch(new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, newConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build());
waitForConnectorReady(connectorName);
// Note: The connector does not get deleted immediately from the first cluster, only on the next timed reconciliation
verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Force reconciliation to assert connector deletion request occurs for first cluster
Checkpoint async = context.checkpoint();
kafkaConnectOperator.reconcile(new Reconciliation("test", "KafkaConnect", NAMESPACE, oldConnectClusterName)).onComplete(context.succeeding(v -> context.verify(() -> {
verify(api, times(1)).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaConnectBuilder in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorReconciliationPausedUnpaused.
@Test
public void testConnectorReconciliationPausedUnpaused() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build());
waitForConnectReady(connectName);
// paused
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).addToAnnotations("strimzi.io/pause-reconciliation", "true").endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorCondition(connectorName, "ReconciliationPaused", null);
// unpaused
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).edit(cntctr -> new KafkaConnectorBuilder(cntctr).editMetadata().addToAnnotations("strimzi.io/pause-reconciliation", "false").endMetadata().build());
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
}
Aggregations