use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectConnectorConnectConnector.
/**
* Create connect, create connector, delete connect, delete connector
*/
@Test
public void testConnectConnectorConnectConnector() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
// triggered twice (Connect creation, Connector Status update)
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
assertThat(connectDeleted, is(true));
waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect resource 'cluster' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace ns.");
boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
assertThat(connectorDeleted, is(true));
verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorPauseResume.
/**
* Create connect, create connector, pause connector, resume connector
*/
@Test
public void testConnectorPauseResume() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
verify(api, never()).pause(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, never()).resume(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).edit(spec -> new KafkaConnectorBuilder(spec).editSpec().withPause(true).endSpec().build());
waitForConnectorState(connectorName, "PAUSED");
verify(api, times(1)).pause(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, never()).resume(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).edit(sp -> new KafkaConnectorBuilder(sp).editSpec().withPause(false).endSpec().build());
waitForConnectorState(connectorName, "RUNNING");
verify(api, times(1)).pause(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, times(1)).resume(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorNotReadyWhenConnectNotConfiguredForConnectors.
@Test
public void testConnectorNotReadyWhenConnectNotConfiguredForConnectors() {
String connectName = "cluster";
String connectorName = "connector";
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect cluster is not configured with annotation " + Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES);
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class KafkaConnectAssemblyOperatorMockTest method setConnectResource.
private void setConnectResource(KafkaConnect connectResource) {
mockKube = new MockKube();
mockClient = mockKube.withCustomResourceDefinition(Crds.kafkaConnect(), KafkaConnect.class, KafkaConnectList.class, KafkaConnect::getStatus, KafkaConnect::setStatus).withInitialInstances(Collections.singleton(connectResource)).end().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class, KafkaConnector::getStatus, KafkaConnector::setStatus).end().build();
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class KafkaConnectBuildAssemblyOperatorKubeTest method testContinueWithPreviousBuildOnKube.
@SuppressWarnings({ "checkstyle:MethodLength" })
@Test
public void testContinueWithPreviousBuildOnKube(VertxTestContext context) {
Plugin plugin1 = new PluginBuilder().withName("plugin1").withArtifacts(new JarArtifactBuilder().withUrl("https://my-domain.tld/my.jar").build()).build();
Plugin plugin2 = new PluginBuilder().withName("plugin2").withArtifacts(new JarArtifactBuilder().withUrl("https://my-domain.tld/my2.jar").build()).build();
KafkaConnect oldKc = new KafkaConnectBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withReplicas(1).withBootstrapServers("my-cluster-kafka-bootstrap:9092").withNewBuild().withNewDockerOutput().withImage(OUTPUT_IMAGE).withPushSecret("my-docker-credentials").endDockerOutput().withPlugins(plugin1, plugin2).endBuild().endSpec().build();
KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS);
KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS);
KafkaConnect kc = new KafkaConnectBuilder(oldKc).editSpec().editBuild().withPlugins(plugin1, plugin2).endBuild().endSpec().build();
KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
// Prepare and get mocks
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
CrdOperator mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDepOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
PodDisruptionBudgetV1Beta1Operator mockPdbOpsV1Beta1 = supplier.podDisruptionBudgetV1Beta1Operator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
// Mock KafkaConnector ops
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock KafkaConnect ops
when(mockConnectOps.get(NAMESPACE, NAME)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
// Mock and capture service ops
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock and capture deployment ops
ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> {
Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null);
dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, "oldhashstub");
dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest");
return Future.succeededFuture(dep);
});
when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
// Mock and capture CM ops
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
ArgumentCaptor<ConfigMap> dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class);
when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
// Mock and capture Pod ops
ArgumentCaptor<Pod> builderPodCaptor = ArgumentCaptor.forClass(Pod.class);
when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
Pod runningBuild = new PodBuilder().withNewMetadata().withName(KafkaConnectResources.buildPodName(NAME)).withNamespace(NAMESPACE).withAnnotations(singletonMap(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub() + OUTPUT_IMAGE_HASH_STUB)).endMetadata().withNewSpec().endSpec().build();
Pod terminatedPod = new PodBuilder().withNewMetadata().withName(KafkaConnectResources.buildPodName(NAME)).withNamespace(NAMESPACE).withAnnotations(singletonMap(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub())).endMetadata().withNewSpec().endSpec().withNewStatus().withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build@sha256:blablabla").endTerminated().endState().build()).endStatus().build();
when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null));
when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(runningBuild), Future.succeededFuture(terminatedPod));
// Mock and capture BuildConfig ops
when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
// Mock and capture NP ops
when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
// Mock and capture PDB ops
when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
when(mockPdbOpsV1Beta1.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
// Mock and capture KafkaConnect ops for status update
ArgumentCaptor<KafkaConnect> connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class);
when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock KafkaConnect API client
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
// Prepare and run reconciliation
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
Checkpoint async = context.checkpoint();
ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Deployment
List<Deployment> capturedDeps = depCaptor.getAllValues();
assertThat(capturedDeps, hasSize(1));
Deployment dep = capturedDeps.get(0);
assertThat(dep.getMetadata().getName(), is(connect.getName()));
assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImage(), is("my-connect-build@sha256:blablabla"));
assertThat(Annotations.stringAnnotation(dep.getSpec().getTemplate(), Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, null), is(build.generateDockerfile().hashStub() + OUTPUT_IMAGE_HASH_STUB));
// Verify ConfigMap
List<ConfigMap> capturedCms = dockerfileCaptor.getAllValues();
assertThat(capturedCms, hasSize(0));
// Verify builder Pod
List<Pod> capturedBuilderPods = builderPodCaptor.getAllValues();
assertThat(capturedBuilderPods, hasSize(1));
assertThat(capturedBuilderPods.stream().filter(pod -> pod != null).collect(Collectors.toList()), hasSize(0));
// Verify status
List<KafkaConnect> capturedConnects = connectCaptor.getAllValues();
assertThat(capturedConnects, hasSize(1));
KafkaConnectStatus connectStatus = capturedConnects.get(0).getStatus();
assertThat(connectStatus.getConditions().get(0).getStatus(), is("True"));
assertThat(connectStatus.getConditions().get(0).getType(), is("Ready"));
async.flag();
})));
}
Aggregations