use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class KafkaConnectAssemblyOperatorTest method testDeleteClusterRoleBindings.
@Test
public void testDeleteClusterRoleBindings(VertxTestContext context) {
String kcName = "foo";
String kcNamespace = "test";
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
ClusterRoleBindingOperator mockCrbOps = supplier.clusterRoleBindingOperator;
ArgumentCaptor<ClusterRoleBinding> desiredCrb = ArgumentCaptor.forClass(ClusterRoleBinding.class);
when(mockCrbOps.reconcile(any(), eq(KafkaConnectResources.initContainerClusterRoleBindingName(kcName, kcNamespace)), desiredCrb.capture())).thenReturn(Future.succeededFuture());
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockCntrOps = supplier.kafkaConnectorOperator;
when(mockCntrOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList()));
KafkaConnectAssemblyOperator op = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS));
Reconciliation reconciliation = new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName);
Checkpoint async = context.checkpoint();
op.delete(reconciliation).onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(desiredCrb.getValue(), is(nullValue()));
Mockito.verify(mockCrbOps, times(1)).reconcile(any(), any(), any());
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class KafkaConnectBuildAssemblyOperatorOpenShiftTest method testRestartPreviousBuildOnOpenShift.
@SuppressWarnings({ "checkstyle:MethodLength" })
@Test
public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) {
Plugin plugin1 = new PluginBuilder().withName("plugin1").withArtifacts(new JarArtifactBuilder().withUrl("https://my-domain.tld/my.jar").build()).build();
Plugin plugin2 = new PluginBuilder().withName("plugin2").withArtifacts(new JarArtifactBuilder().withUrl("https://my-domain.tld/my2.jar").build()).build();
KafkaConnect oldKc = new KafkaConnectBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withReplicas(1).withBootstrapServers("my-cluster-kafka-bootstrap:9092").withNewBuild().withNewDockerOutput().withImage(OUTPUT_IMAGE).withPushSecret("my-docker-credentials").endDockerOutput().withPlugins(plugin1).endBuild().endSpec().build();
KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS);
KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS);
KafkaConnect kc = new KafkaConnectBuilder(oldKc).editSpec().editBuild().withPlugins(plugin1, plugin2).endBuild().endSpec().build();
KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
// Prepare and get mocks
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
CrdOperator mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDepOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
PodDisruptionBudgetV1Beta1Operator mockPdbOpsV1Beta1 = supplier.podDisruptionBudgetV1Beta1Operator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
BuildOperator mockBuildOps = supplier.buildOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
// Mock KafkaConnector ops
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock KafkaConnect ops
when(mockConnectOps.get(NAMESPACE, NAME)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
// Mock and capture service ops
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock and capture deployment ops
ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> {
Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null);
dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub());
dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest");
return Future.succeededFuture(dep);
});
when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
// Mock and capture CM ops
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
// Mock and capture Pod ops
when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
// Mock and capture Build ops
Build oldBuilder = new BuildBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(KafkaConnectResources.buildName(NAME, 1L)).withAnnotations(singletonMap(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub())).endMetadata().withNewSpec().endSpec().withNewStatus().withPhase("Running").endStatus().build();
Build newBuilder = new BuildBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(KafkaConnectResources.buildName(NAME, 2L)).withAnnotations(singletonMap(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, build.generateDockerfile().hashStub())).endMetadata().withNewSpec().endSpec().withNewStatus().withPhase("Complete").withNewOutputDockerImageReference(OUTPUT_IMAGE).withNewOutput().withNewTo().withImageDigest("sha256:blablabla").endTo().endOutput().endStatus().build();
when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)))).thenReturn(Future.succeededFuture(oldBuilder));
when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture());
when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)))).thenReturn(Future.succeededFuture(newBuilder));
// Mock and capture BuildConfig ops
ArgumentCaptor<BuildConfig> buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class);
when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
BuildConfig oldBuildConfig = new BuildConfigBuilder(oldBuild.generateBuildConfig(oldBuild.generateDockerfile())).withNewStatus().withLastVersion(1L).endStatus().build();
when(mockBcOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)))).thenReturn(Future.succeededFuture(oldBuildConfig));
ArgumentCaptor<BuildRequest> buildRequestCaptor = ArgumentCaptor.forClass(BuildRequest.class);
when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(newBuilder));
// Mock and capture NP ops
when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
// Mock and capture PDB ops
when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
when(mockPdbOpsV1Beta1.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
// Mock and capture KafkaConnect ops for status update
ArgumentCaptor<KafkaConnect> connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class);
when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock KafkaConnect API client
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
// Prepare and run reconciliation
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
Checkpoint async = context.checkpoint();
ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Deployment
List<Deployment> capturedDeps = depCaptor.getAllValues();
assertThat(capturedDeps, hasSize(1));
Deployment dep = capturedDeps.get(0);
assertThat(dep.getMetadata().getName(), is(connect.getName()));
assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImage(), is("my-connect-build@sha256:blablabla"));
assertThat(Annotations.stringAnnotation(dep.getSpec().getTemplate(), Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, null), is(build.generateDockerfile().hashStub() + OUTPUT_IMAGE_HASH_STUB));
// Verify BuildConfig
List<BuildConfig> capturedBcs = buildConfigCaptor.getAllValues();
assertThat(capturedBcs, hasSize(1));
BuildConfig buildConfig = capturedBcs.get(0);
assertThat(buildConfig.getSpec().getSource().getDockerfile(), is(build.generateDockerfile().getDockerfile()));
// Verify status
List<KafkaConnect> capturedConnects = connectCaptor.getAllValues();
assertThat(capturedConnects, hasSize(1));
KafkaConnectStatus connectStatus = capturedConnects.get(0).getStatus();
assertThat(connectStatus.getConditions().get(0).getStatus(), is("True"));
assertThat(connectStatus.getConditions().get(0).getType(), is("Ready"));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class KafkaConnectBuildAssemblyOperatorOpenShiftTest method testUpdateWithoutRebuildOnOpenShift.
@Test
public void testUpdateWithoutRebuildOnOpenShift(VertxTestContext context) {
Plugin plugin1 = new PluginBuilder().withName("plugin1").withArtifacts(new JarArtifactBuilder().withUrl("https://my-domain.tld/my.jar").build()).build();
KafkaConnect kc = new KafkaConnectBuilder().withNewMetadata().withName(NAME).withNamespace(NAMESPACE).endMetadata().withNewSpec().withReplicas(1).withBootstrapServers("my-cluster-kafka-bootstrap:9092").withNewBuild().withNewDockerOutput().withImage(OUTPUT_IMAGE).withPushSecret("my-docker-credentials").endDockerOutput().withPlugins(plugin1).endBuild().endSpec().build();
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
// Prepare and get mocks
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
CrdOperator mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDepOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
PodDisruptionBudgetV1Beta1Operator mockPdbOpsV1Beta1 = supplier.podDisruptionBudgetV1Beta1Operator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
BuildOperator mockBuildOps = supplier.buildOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
// Mock KafkaConnector ops
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
// Mock KafkaConnect ops
when(mockConnectOps.get(NAMESPACE, NAME)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
// Mock and capture service ops
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock and capture deployment ops
ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> {
Deployment dep = connect.generateDeployment(emptyMap(), false, null, null);
dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, build.generateDockerfile().hashStub() + OUTPUT_IMAGE_HASH_STUB);
dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:blablabla");
return Future.succeededFuture(dep);
});
when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42));
when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
// Mock and capture CM ops
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
// Mock and capture Pod ops
when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
// Mock and capture BuildConfig ops
ArgumentCaptor<BuildConfig> buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class);
when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
// Mock and capture NP ops
when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
// Mock and capture PDB ops
when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
when(mockPdbOpsV1Beta1.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture());
// Mock and capture KafkaConnect ops for status update
ArgumentCaptor<KafkaConnect> connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class);
when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture());
// Mock KafkaConnect API client
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
// Prepare and run reconciliation
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
Checkpoint async = context.checkpoint();
ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify Deployment
List<Deployment> capturedDeps = depCaptor.getAllValues();
assertThat(capturedDeps, hasSize(1));
Deployment dep = capturedDeps.get(0);
assertThat(dep.getMetadata().getName(), is(connect.getName()));
assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImage(), is("my-connect-build@sha256:blablabla"));
assertThat(Annotations.stringAnnotation(dep.getSpec().getTemplate(), Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, null), is(build.generateDockerfile().hashStub() + OUTPUT_IMAGE_HASH_STUB));
// Verify BuildConfig
List<BuildConfig> capturedBcs = buildConfigCaptor.getAllValues();
assertThat(capturedBcs, hasSize(0));
// Verify status
List<KafkaConnect> capturedConnects = connectCaptor.getAllValues();
assertThat(capturedConnects, hasSize(1));
KafkaConnectStatus connectStatus = capturedConnects.get(0).getStatus();
assertThat(connectStatus.getConditions().get(0).getStatus(), is("True"));
assertThat(connectStatus.getConditions().get(0).getType(), is("Ready"));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class AbstractConnectOperator method reconcileConnectors.
/**
* Reconcile all the connectors selected by the given connect instance, updated each connectors status with the result.
* @param reconciliation The reconciliation
* @param connect The connector
* @param connectStatus Status of the KafkaConnect resource (will be used to set the available
* connector plugins)
* @param scaledToZero Indicated whether the related Connect cluster is currently scaled to 0 replicas
* @return A future, failed if any of the connectors' statuses could not be updated.
*/
protected Future<Void> reconcileConnectors(Reconciliation reconciliation, T connect, S connectStatus, boolean scaledToZero, String desiredLogging, OrderedProperties defaultLogging) {
String connectName = connect.getMetadata().getName();
String namespace = connect.getMetadata().getNamespace();
String host = KafkaConnectResources.qualifiedServiceName(connectName, namespace);
if (!isUseResources(connect)) {
return Future.succeededFuture();
}
if (scaledToZero) {
return connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, zeroReplicas(namespace, connectName))).collect(Collectors.toList()))).map((Void) null);
}
KafkaConnectApi apiClient = connectClientProvider.apply(vertx);
return CompositeFuture.join(apiClient.list(host, port), connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())), apiClient.listConnectorPlugins(reconciliation, host, port), apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging)).compose(cf -> {
List<String> runningConnectorNames = cf.resultAt(0);
List<KafkaConnector> desiredConnectors = cf.resultAt(1);
List<ConnectorPlugin> connectorPlugins = cf.resultAt(2);
LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status");
connectStatus.setConnectorPlugins(connectorPlugins);
connectorsResourceCounter(namespace).set(desiredConnectors.size());
Set<String> deleteConnectorNames = new HashSet<>(runningConnectorNames);
deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames);
Stream<Future<Void>> deletionFutures = deleteConnectorNames.stream().map(connectorName -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null));
LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors);
Stream<Future<Void>> createUpdateFutures = desiredConnectors.stream().map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));
return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null);
}).recover(error -> {
if (error instanceof ConnectTimeoutException) {
Promise<Void> connectorStatuses = Promise.promise();
LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status");
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, error)).collect(Collectors.toList()))).onComplete(ignore -> connectorStatuses.fail(error));
return connectorStatuses.future();
} else {
return Future.failedFuture(error);
}
});
}
use of io.strimzi.api.kafka.model.KafkaConnector in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaConnectorSteps method kc_config_contains.
@And("the kctr has config containing:")
public void kc_config_contains(DataTable table) {
KafkaConnector res = kctr();
assertThat(res).isNotNull();
assertThatDataTable(table, ctx::resolvePlaceholders).matches(res.getSpec().getConfig());
}
Aggregations