use of io.strimzi.api.kafka.model.KafkaConnectorBuilder in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectorNotReadyWhenConnectDoesNotExist.
@Test
public void testConnectorNotReadyWhenConnectDoesNotExist() {
String connectorName = "connector";
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectorName).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, "cluster").endMetadata().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect resource 'cluster' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace ns.");
}
use of io.strimzi.api.kafka.model.KafkaConnectorBuilder in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectorNotReadyWhenExceptionFromConnectRestApi.
/**
* Create connect, create connector, delete connector, delete connect
*/
@Test
public void testConnectorNotReadyWhenExceptionFromConnectRestApi() {
String connectName = "cluster";
String connectorName = "connector";
when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenAnswer(invocation -> Future.failedFuture(new ConnectRestException("GET", "/foo", 500, "Internal server error", "Bad stuff happened")));
// NOTE: Clear runningConnectors as re-mocking it causes an entry to be added
runningConnectors.clear();
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// triggered at least once (Connect creation)
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector, should not go ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorNotReady(connectorName, "ConnectRestException", "GET /foo returned 500 (Internal server error): Bad stuff happened");
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(2)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(empty()));
}
use of io.strimzi.api.kafka.model.KafkaConnectorBuilder in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectConnectorConnectorConnect.
/**
* Create connect, create connector, delete connector, delete connect
*/
@Test
public void testConnectConnectorConnectorConnect() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
assertThat(connectorDeleted, is(true));
waitFor("delete call on connect REST api", 1_000, 30_000, () -> runningConnectors.isEmpty());
// Verify connector is deleted from the connect via REST api
verify(api).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
assertThat(connectDeleted, is(true));
}
use of io.strimzi.api.kafka.model.KafkaConnectorBuilder in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method computeStatus.
@ParameterizedTest
@MethodSource
void computeStatus(String connectorState, String conditionType, String conditionReason, String expectedConnectorState) {
ConnectorStatusSpec status = new ConnectorStatusSpec();
DebeziumOperandSupport.computeStatus(status, new KafkaConnectorBuilder().withStatus(new KafkaConnectorStatusBuilder().addToConditions(new ConditionBuilder().withType(conditionType).withReason(conditionReason).build()).addToConnectorStatus("connector", new org.bf2.cos.fleetshard.operator.debezium.model.KafkaConnectorStatusBuilder().withState(connectorState).build()).build()).build());
assertThat(status.getPhase()).isEqualTo(expectedConnectorState);
assertThat(status.getConditions()).anySatisfy(condition -> {
assertThat(condition).hasFieldOrPropertyWithValue("type", conditionType).hasFieldOrPropertyWithValue("reason", conditionReason);
});
}
use of io.strimzi.api.kafka.model.KafkaConnectorBuilder in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorRestart.
/**
* Create connect, create connector, restart connector
*/
@Test
public void testConnectorRestart() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build());
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build());
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
verify(api, never()).restart(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, never()).restartTask(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), eq(0));
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).edit(connector -> new KafkaConnectorBuilder(connector).editMetadata().addToAnnotations(Annotations.ANNO_STRIMZI_IO_RESTART, "true").endMetadata().build());
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
waitForRemovedAnnotation(connectorName, Annotations.ANNO_STRIMZI_IO_RESTART);
verify(api, times(1)).restart(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, never()).restartTask(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), eq(0));
}
Aggregations