use of io.strimzi.api.kafka.model.KafkaConnect in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectorNotReadyWhenExceptionFromConnectRestApi.
/**
* Create connect, create connector, delete connector, delete connect
*/
@Test
public void testConnectorNotReadyWhenExceptionFromConnectRestApi() {
String connectName = "cluster";
String connectorName = "connector";
when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenAnswer(invocation -> Future.failedFuture(new ConnectRestException("GET", "/foo", 500, "Internal server error", "Bad stuff happened")));
// NOTE: Clear runningConnectors as re-mocking it causes an entry to be added
runningConnectors.clear();
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// triggered at least once (Connect creation)
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector, should not go ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorNotReady(connectorName, "ConnectRestException", "GET /foo returned 500 (Internal server error): Bad stuff happened");
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(2)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(empty()));
}
use of io.strimzi.api.kafka.model.KafkaConnect in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectConnectorConnectorConnect.
/**
* Create connect, create connector, delete connector, delete connect
*/
@Test
public void testConnectConnectorConnectorConnect() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
assertThat(connectorDeleted, is(true));
waitFor("delete call on connect REST api", 1_000, 30_000, () -> runningConnectors.isEmpty());
// Verify connector is deleted from the connect via REST api
verify(api).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
assertThat(connectDeleted, is(true));
}
use of io.strimzi.api.kafka.model.KafkaConnect in project strimzi-kafka-operator by strimzi.
the class KafkaConnectAssemblyOperatorMockTest method setConnectResource.
private void setConnectResource(KafkaConnect connectResource) {
mockKube = new MockKube();
mockClient = mockKube.withCustomResourceDefinition(Crds.kafkaConnect(), KafkaConnect.class, KafkaConnectList.class, KafkaConnect::getStatus, KafkaConnect::setStatus).withInitialInstances(Collections.singleton(connectResource)).end().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class, KafkaConnector::getStatus, KafkaConnector::setStatus).end().build();
}
use of io.strimzi.api.kafka.model.KafkaConnect in project strimzi-kafka-operator by strimzi.
the class KafkaConnectAssemblyOperatorMockTest method testPauseReconcileUnpause.
@Test
public void testPauseReconcileUnpause(VertxTestContext context) {
setConnectResource(new KafkaConnectBuilder().withMetadata(new ObjectMetaBuilder().withName(CLUSTER_NAME).withNamespace(NAMESPACE).withLabels(TestUtils.map("foo", "bar")).withAnnotations(singletonMap("strimzi.io/pause-reconciliation", "true")).build()).withNewSpec().withReplicas(replicas).endSpec().build());
KafkaConnectApi mock = mock(KafkaConnectApi.class);
when(mock.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
when(mock.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
Checkpoint async = context.checkpoint();
createConnectCluster(context, mock, true).onComplete(context.succeeding()).compose(v -> {
LOGGER.info("Reconciling again -> update");
return kco.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
Resource<KafkaConnect> resource = Crds.kafkaConnectOperation(mockClient).inNamespace(NAMESPACE).withName(CLUSTER_NAME);
if (resource.get().getStatus() == null) {
fail();
}
List<Condition> conditions = resource.get().getStatus().getConditions();
boolean conditionFound = false;
if (conditions != null && !conditions.isEmpty()) {
for (Condition condition : conditions) {
if ("ReconciliationPaused".equals(condition.getType())) {
conditionFound = true;
break;
}
}
}
assertTrue(conditionFound);
async.flag();
}))).compose(v -> {
setConnectResource(new KafkaConnectBuilder().withMetadata(new ObjectMetaBuilder().withName(CLUSTER_NAME).withNamespace(NAMESPACE).withLabels(TestUtils.map("foo", "bar")).withAnnotations(singletonMap("strimzi.io/pause-reconciliation", "false")).build()).withNewSpec().withReplicas(replicas).endSpec().build());
LOGGER.info("Reconciling again -> update");
return kco.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME));
}).onComplete(context.succeeding(v -> context.verify(() -> {
Resource<KafkaConnect> resource = Crds.kafkaConnectOperation(mockClient).inNamespace(NAMESPACE).withName(CLUSTER_NAME);
if (resource.get().getStatus() == null) {
fail();
}
List<Condition> conditions = resource.get().getStatus().getConditions();
boolean conditionFound = false;
if (conditions != null && !conditions.isEmpty()) {
for (Condition condition : conditions) {
if ("ReconciliationPaused".equals(condition.getType())) {
conditionFound = true;
break;
}
}
}
assertFalse(conditionFound);
async.flag();
})));
}
use of io.strimzi.api.kafka.model.KafkaConnect in project strimzi-kafka-operator by strimzi.
the class KafkaConnectAssemblyOperatorTest method testCreateOrUpdateDoesNotUpdateWithNoDiff.
@Test
public void testCreateOrUpdateDoesNotUpdateWithNoDiff(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
var mockConnectOps = supplier.connectOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
NetworkPolicyOperator mockNetPolOps = supplier.networkPolicyOperator;
PodOperator mockPodOps = supplier.podOperations;
BuildConfigOperator mockBcOps = supplier.buildConfigOperations;
SecretOperator mockSecretOps = supplier.secretOperations;
CrdOperator<KubernetesClient, KafkaConnector, KafkaConnectorList> mockConnectorOps = supplier.kafkaConnectorOperator;
String kcName = "foo";
String kcNamespace = "test";
KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName);
KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS);
when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList()));
when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc);
when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc));
when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture());
when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService());
when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(Map.of(), true, null, null)));
when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> serviceNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), eq(kcNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDcOps.reconcile(any(), eq(kcNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleUp(any(), eq(kcNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleDown(any(), eq(kcNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy())));
when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null)));
ArgumentCaptor<PodDisruptionBudget> pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class);
when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture());
KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class);
when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList()));
ConnectorPlugin plugin1 = new ConnectorPluginBuilder().withConnectorClass("io.strimzi.MyClass").withType("sink").withVersion("1.0.0").build();
when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1)));
when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture());
KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient);
Checkpoint async = context.checkpoint();
ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName), kc).onComplete(context.succeeding(v -> context.verify(() -> {
// Verify service
List<Service> capturedServices = serviceCaptor.getAllValues();
assertThat(capturedServices, hasSize(1));
// Verify Deployment Config
List<Deployment> capturedDc = dcCaptor.getAllValues();
assertThat(capturedDc, hasSize(1));
// Verify scaleDown / scaleUp were not called
assertThat(dcScaleDownNameCaptor.getAllValues(), hasSize(1));
assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1));
// Verify PodDisruptionBudget
List<PodDisruptionBudget> capturedPdb = pdbCaptor.getAllValues();
assertThat(capturedPdb, hasSize(1));
PodDisruptionBudget pdb = capturedPdb.get(0);
assertThat(pdb.getMetadata().getName(), is(connect.getName()));
assertThat(pdb, is(connect.generatePodDisruptionBudget()));
async.flag();
})));
}
Aggregations