use of io.strimzi.api.kafka.model.connect.ConnectorPlugin in project strimzi by strimzi.
the class KafkaConnectApiTest method test.
@IsolatedTest
@SuppressWarnings({ "unchecked", "checkstyle:MethodLength", "checkstyle:NPathComplexity" })
public void test(VertxTestContext context) {
KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
Checkpoint async = context.checkpoint();
client.listConnectorPlugins(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(connectorPlugins -> context.verify(() -> {
assertThat(connectorPlugins.size(), greaterThanOrEqualTo(2));
ConnectorPlugin fileSink = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSinkConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
assertNotNull(fileSink);
assertThat(fileSink.getType(), is("sink"));
assertThat(fileSink.getVersion(), is(not(emptyString())));
ConnectorPlugin fileSource = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSourceConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
assertNotNull(fileSource);
assertThat(fileSource.getType(), is("source"));
assertThat(fileSource.getVersion(), is(not(emptyString())));
}))).compose(connectorPlugins -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(empty()))))).compose(connectorNames -> {
JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test", o);
}).onComplete(context.succeeding()).compose(created -> {
Promise<Map<String, Object>> promise = Promise.promise();
Handler<Long> handler = new Handler<Long>() {
@Override
public void handle(Long timerId) {
client.status(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test").onComplete(result -> {
if (result.succeeded()) {
Map<String, Object> status = result.result();
if ("RUNNING".equals(((Map) status.getOrDefault("connector", emptyMap())).get("state"))) {
promise.complete(status);
return;
} else {
System.err.println(status);
}
} else {
result.cause().printStackTrace();
}
vertx.setTimer(1000, this);
});
}
};
vertx.setTimer(1000, handler);
return promise.future();
}).onComplete(context.succeeding(status -> context.verify(() -> {
assertThat(status.get("name"), is("test"));
Map<String, Object> connectorStatus = (Map<String, Object>) status.getOrDefault("connector", emptyMap());
assertThat(connectorStatus.get("state"), is("RUNNING"));
assertThat(connectorStatus.get("worker_id"), is("localhost:18083"));
System.out.println("help " + connectorStatus);
List<Map> tasks = (List<Map>) status.get("tasks");
for (Map an : tasks) {
assertThat(an.get("state"), is("RUNNING"));
assertThat(an.get("worker_id"), is("localhost:18083"));
}
}))).compose(status -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "test")).onComplete(context.succeeding(config -> context.verify(() -> {
assertThat(config, is(TestUtils.map("connector.class", "FileStreamSource", "file", "/dev/null", "tasks.max", "1", "name", "test", "topic", "my-topic")));
}))).compose(config -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "does-not-exist")).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(((ConnectRestException) error).getStatusCode(), is(404));
}))).recover(error -> Future.succeededFuture()).compose(ignored -> client.pause("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.resume("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restart("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restartTask("localhost", PORT, "test", 0)).onComplete(context.succeeding()).compose(ignored -> {
JsonObject o = new JsonObject().put("connector.class", "ThisConnectorDoesNotExist").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken", o);
}).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Failed to find any class that implements Connector and which name matches ThisConnectorDoesNotExist"));
}))).recover(e -> Future.succeededFuture()).compose(ignored -> {
JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "dog").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken2", o);
}).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Invalid value dog for configuration tasks.max: Not a number of type INT"));
}))).recover(e -> Future.succeededFuture()).compose(createResponse -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(singletonList("test")))))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test")).onComplete(context.succeeding()).compose(deletedConnector -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> assertThat(connectorNames, is(empty())))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "never-existed")).onComplete(context.failing(error -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Connector never-existed not found"));
async.flag();
}));
}
use of io.strimzi.api.kafka.model.connect.ConnectorPlugin in project strimzi by strimzi.
the class AbstractConnectOperator method reconcileConnectors.
/**
* Reconcile all the connectors selected by the given connect instance, updated each connectors status with the result.
* @param reconciliation The reconciliation
* @param connect The connector
* @param connectStatus Status of the KafkaConnect resource (will be used to set the available
* connector plugins)
* @param scaledToZero Indicated whether the related Connect cluster is currently scaled to 0 replicas
* @return A future, failed if any of the connectors' statuses could not be updated.
*/
protected Future<Void> reconcileConnectors(Reconciliation reconciliation, T connect, S connectStatus, boolean scaledToZero, String desiredLogging, OrderedProperties defaultLogging) {
String connectName = connect.getMetadata().getName();
String namespace = connect.getMetadata().getNamespace();
String host = KafkaConnectResources.qualifiedServiceName(connectName, namespace);
if (!isUseResources(connect)) {
return Future.succeededFuture();
}
if (scaledToZero) {
return connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, zeroReplicas(namespace, connectName))).collect(Collectors.toList()))).map((Void) null);
}
KafkaConnectApi apiClient = connectClientProvider.apply(vertx);
return CompositeFuture.join(apiClient.list(host, port), connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())), apiClient.listConnectorPlugins(reconciliation, host, port), apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging)).compose(cf -> {
List<String> runningConnectorNames = cf.resultAt(0);
List<KafkaConnector> desiredConnectors = cf.resultAt(1);
List<ConnectorPlugin> connectorPlugins = cf.resultAt(2);
LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status");
connectStatus.setConnectorPlugins(connectorPlugins);
connectorsResourceCounter(namespace).set(desiredConnectors.size());
Set<String> deleteConnectorNames = new HashSet<>(runningConnectorNames);
deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames);
Stream<Future<Void>> deletionFutures = deleteConnectorNames.stream().map(connectorName -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null));
LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors);
Stream<Future<Void>> createUpdateFutures = desiredConnectors.stream().map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));
return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null);
}).recover(error -> {
if (error instanceof ConnectTimeoutException) {
Promise<Void> connectorStatuses = Promise.promise();
LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status");
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, error)).collect(Collectors.toList()))).onComplete(ignore -> connectorStatuses.fail(error));
return connectorStatuses.future();
} else {
return Future.failedFuture(error);
}
});
}
use of io.strimzi.api.kafka.model.connect.ConnectorPlugin in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method setupMockConnectAPI.
private void setupMockConnectAPI() {
api = mock(KafkaConnectApi.class);
runningConnectors = new HashMap<>();
when(api.list(any(), anyInt())).thenAnswer(i -> {
String host = i.getArgument(0);
String matchingKeyPrefix = host + "##";
return Future.succeededFuture(runningConnectors.keySet().stream().filter(s -> s.startsWith(matchingKeyPrefix)).map(s -> s.substring(matchingKeyPrefix.length())).collect(Collectors.toList()));
});
when(api.listConnectorPlugins(any(), any(), anyInt())).thenAnswer(i -> {
ConnectorPlugin connectorPlugin = new ConnectorPluginBuilder().withConnectorClass("io.strimzi.MyClass").withType("sink").withVersion("1.0.0").build();
return Future.succeededFuture(Collections.singletonList(connectorPlugin));
});
when(api.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture());
when(api.getConnectorConfig(any(), any(), any(), anyInt(), any())).thenAnswer(invocation -> {
String host = invocation.getArgument(2);
String connectorName = invocation.getArgument(4);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState != null) {
Map<String, String> map = new HashMap<>();
map.put("name", connectorName);
for (Map.Entry<String, Object> entry : connectorState.config) {
if (entry.getValue() != null) {
map.put(entry.getKey(), entry.getValue().toString());
}
}
return Future.succeededFuture(map);
} else {
return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s/config", connectorName), 404, "Not Found", ""));
}
});
when(api.getConnector(any(), any(), anyInt(), any())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
String connectorName = invocation.getArgument(3);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s", connectorName), 404, "Not Found", ""));
}
return Future.succeededFuture(TestUtils.map("name", connectorName, "config", connectorState.config, "tasks", emptyMap()));
});
when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenAnswer(invocation -> {
LOGGER.info((String) invocation.getArgument(1) + invocation.getArgument(2) + invocation.getArgument(3) + invocation.getArgument(4));
String host = invocation.getArgument(1);
LOGGER.info("###### create " + host);
String connectorName = invocation.getArgument(3);
JsonObject connectorConfig = invocation.getArgument(4);
runningConnectors.putIfAbsent(key(host, connectorName), new ConnectorState(false, connectorConfig));
return Future.succeededFuture();
});
when(api.delete(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
LOGGER.info("###### delete " + host);
String connectorName = invocation.getArgument(3);
ConnectorState remove = runningConnectors.remove(key(host, connectorName));
return remove != null ? Future.succeededFuture() : Future.failedFuture("No such connector " + connectorName);
});
when(api.statusWithBackOff(any(), any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(2);
LOGGER.info("###### status " + host);
String connectorName = invocation.getArgument(4);
return kafkaConnectApiStatusMock(host, connectorName);
});
when(api.status(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
LOGGER.info("###### status " + host);
String connectorName = invocation.getArgument(3);
return kafkaConnectApiStatusMock(host, connectorName);
});
when(api.pause(any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
if (!connectorState.paused) {
runningConnectors.put(key(host, connectorName), new ConnectorState(true, connectorState.config));
}
return Future.succeededFuture();
});
when(api.resume(any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
if (connectorState.paused) {
runningConnectors.put(key(host, connectorName), new ConnectorState(false, connectorState.config));
}
return Future.succeededFuture();
});
when(api.restart(any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
return Future.succeededFuture();
});
when(api.restartTask(any(), anyInt(), anyString(), anyInt())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
return Future.succeededFuture();
});
when(api.getConnectorTopics(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
String connectorName = invocation.getArgument(3);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s/topics", connectorName), 404, "Not Found", ""));
}
return Future.succeededFuture(List.of("my-topic"));
});
}
use of io.strimzi.api.kafka.model.connect.ConnectorPlugin in project strimzi-kafka-operator by strimzi.
the class KafkaConnectAssemblyOperatorTest method testCreateOrUpdateDoesNotUpdateWithNoDiff.
@Test
public void testCreateOrUpdateDoesNotUpdateWithNoDiff(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
var mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
String kcName = "foo";
String kcNamespace = "test";
KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture());
when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService());
when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(Map.of(), true, null, null)));
when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> serviceNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), eq(kcNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDcOps.reconcile(any(), eq(kcNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleUp(any(), eq(kcNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleDown(any(), eq(kcNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
ArgumentCaptor<PodDisruptionBudget> pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class);
when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture());
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
ConnectorPlugin plugin1 = new ConnectorPluginBuilder().withConnectorClass("io.strimzi.MyClass").withType("sink").withVersion("1.0.0").build();
when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1)));
when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture());
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
Checkpoint async = context.checkpoint();
ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName), kc).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify service
List<Service> capturedServices = serviceCaptor.getAllValues();
assertThat(capturedServices, hasSize(1));
// Verify Deployment Config
List<Deployment> capturedDc = dcCaptor.getAllValues();
assertThat(capturedDc, hasSize(1));
// Verify scaleDown / scaleUp were not called
assertThat(dcScaleDownNameCaptor.getAllValues(), hasSize(1));
assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1));
// Verify PodDisruptionBudget
List<PodDisruptionBudget> capturedPdb = pdbCaptor.getAllValues();
assertThat(capturedPdb, hasSize(1));
PodDisruptionBudget pdb = capturedPdb.get(0);
assertThat(pdb.getMetadata().getName(), is(connect.getName()));
assertThat(pdb, is(connect.generatePodDisruptionBudget()));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.connect.ConnectorPlugin in project strimzi-kafka-operator by strimzi.
the class KafkaConnectAssemblyOperatorTest method testUpdateClusterScaleUp.
@Test
public void testUpdateClusterScaleUp(VertxTestContext context) {
final int scaleTo = 4;
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
var mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
String kcName = "foo";
String kcNamespace = "test";
KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
// Change replicas to create ScaleUp
kc.getSpec().setReplicas(scaleTo);
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture());
when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService());
when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(Map.of(), true, null, null)));
when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
when(mockServiceOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture());
when(mockDcOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture());
doAnswer(i -> Future.succeededFuture(scaleTo)).when(mockDcOps).scaleUp(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo));
doAnswer(i -> Future.succeededFuture(scaleTo)).when(mockDcOps).scaleDown(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo));
when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
when(mockConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect())));
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget())));
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
ConnectorPlugin plugin1 = new ConnectorPluginBuilder().withConnectorClass("io.strimzi.MyClass").withType("sink").withVersion("1.0.0").build();
when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1)));
when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture());
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
Checkpoint async = context.checkpoint();
ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName), kc).onComplete(context.succeeding(v -> context.verify(() -> {
verify(mockDcOps).scaleUp(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo));
async.flag();
})));
}
Aggregations