use of io.strimzi.api.kafka.model.KafkaConnector in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaConnectorSteps method kctr_has_a_path_containing_object.
@And("the kctr has an array at path {string} containing:")
public void kctr_has_a_path_containing_object(String path, DataTable elements) {
KafkaConnector res = kctr();
assertThat(res).isNotNull();
assertThatJson(JacksonUtil.asJsonNode(res)).inPath(path).isArray().containsAll(elements.asList().stream().map(e -> ctx.resolvePlaceholders(e)).map(e -> Serialization.unmarshal(e, JsonNode.class)).collect(Collectors.toList()));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaConnectorSteps method kctr_label_contains.
@And("the kctr has labels containing:")
public void kctr_label_contains(DataTable table) {
KafkaConnector res = kctr();
assertThat(res).isNotNull();
assertThatDataTable(table, ctx::resolvePlaceholders).matches(res.getMetadata().getLabels());
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method reconcileConnectors.
/**
* Reconcile all the connectors selected by the given connect instance, updated each connectors status with the result.
* @param reconciliation The reconciliation
* @param connect The connector
* @param connectStatus Status of the KafkaConnect resource (will be used to set the available
* connector plugins)
* @param scaledToZero Indicated whether the related Connect cluster is currently scaled to 0 replicas
* @return A future, failed if any of the connectors' statuses could not be updated.
*/
protected Future<Void> reconcileConnectors(Reconciliation reconciliation, T connect, S connectStatus, boolean scaledToZero, String desiredLogging, OrderedProperties defaultLogging) {
String connectName = connect.getMetadata().getName();
String namespace = connect.getMetadata().getNamespace();
String host = KafkaConnectResources.qualifiedServiceName(connectName, namespace);
if (!isUseResources(connect)) {
return Future.succeededFuture();
}
if (scaledToZero) {
return connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, zeroReplicas(namespace, connectName))).collect(Collectors.toList()))).map((Void) null);
}
KafkaConnectApi apiClient = connectClientProvider.apply(vertx);
return CompositeFuture.join(apiClient.list(host, port), connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())), apiClient.listConnectorPlugins(reconciliation, host, port), apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging)).compose(cf -> {
List<String> runningConnectorNames = cf.resultAt(0);
List<KafkaConnector> desiredConnectors = cf.resultAt(1);
List<ConnectorPlugin> connectorPlugins = cf.resultAt(2);
LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status");
connectStatus.setConnectorPlugins(connectorPlugins);
connectorsResourceCounter(namespace).set(desiredConnectors.size());
Set<String> deleteConnectorNames = new HashSet<>(runningConnectorNames);
deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames);
Stream<Future<Void>> deletionFutures = deleteConnectorNames.stream().map(connectorName -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null));
LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors);
Stream<Future<Void>> createUpdateFutures = desiredConnectors.stream().map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));
return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null);
}).recover(error -> {
if (error instanceof ConnectTimeoutException) {
Promise<Void> connectorStatuses = Promise.promise();
LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status");
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())).compose(connectors -> CompositeFuture.join(connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, error)).collect(Collectors.toList()))).onComplete(ignore -> connectorStatuses.fail(error));
return connectorStatuses.future();
} else {
return Future.failedFuture(error);
}
});
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testConnectorUnknownField.
@Test
public void testConnectorUnknownField() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build());
waitForConnectReady(connectName);
String yaml = "apiVersion: kafka.strimzi.io/v1alpha1\n" + "kind: KafkaConnector\n" + "metadata:\n" + " name: " + connectorName + "\n" + " namespace: " + NAMESPACE + "\n" + " labels:\n" + " strimzi.io/cluster: " + connectName + "\n" + "spec:\n" + " class: EchoSink\n" + " tasksMax: 1\n" + " unknownField: \"value\"\n" + " config:\n" + " level: INFO\n" + " topics: timer-topic";
KafkaConnector kcr = TestUtils.fromYamlString(yaml, KafkaConnector.class);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(kcr);
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
waitForConnectorCondition(connectorName, "Warning", "UnknownFields");
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi-kafka-operator by strimzi.
the class ConnectorMockTest method testChangeStrimziClusterLabel.
/**
* Change the cluster label from one cluster to another
* check the connector is deleted from the old cluster
* check the connector is added to the new cluster
*/
@Test
public void testChangeStrimziClusterLabel(VertxTestContext context) throws InterruptedException {
String oldConnectClusterName = "cluster1";
String newConnectClusterName = "cluster2";
String connectorName = "connector";
// Create two connect clusters
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(oldConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(oldConnectClusterName);
KafkaConnect connect2 = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(newConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect2);
waitForConnectReady(newConnectClusterName);
// Create KafkaConnector associated with the first cluster using the Strimzi Cluster label and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, oldConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
// triggered twice (Connect creation, Connector Status update) for the first cluster
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// never triggered for the second cluster as connector's Strimzi cluster label does not match cluster 2
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// patch connector with new Strimzi cluster label associated with cluster 2
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).patch(new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, newConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build());
waitForConnectorReady(connectorName);
// Note: The connector does not get deleted immediately from the first cluster, only on the next timed reconciliation
verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Force reconciliation to assert connector deletion request occurs for first cluster
Checkpoint async = context.checkpoint();
kafkaConnectOperator.reconcile(new Reconciliation("test", "KafkaConnect", NAMESPACE, oldConnectClusterName)).onComplete(context.succeeding(v -> context.verify(() -> {
verify(api, times(1)).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
async.flag();
})));
}
Aggregations