use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorUnknownField.
@Test
public void testConnectorUnknownField() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build());
waitForConnectReady(connectName);
String yaml = "apiVersion: kafka.strimzi.io/v1beta2\n" + "kind: KafkaConnector\n" + "metadata:\n" + " name: " + connectorName + "\n" + " namespace: " + NAMESPACE + "\n" + " labels:\n" + " strimzi.io/cluster: " + connectName + "\n" + "spec:\n" + " class: EchoSink\n" + " tasksMax: 1\n" + " unknownField: \"value\"\n" + " config:\n" + " level: INFO\n" + " topics: timer-topic";
KafkaConnector kcr = TestUtils.fromYamlString(yaml, KafkaConnector.class);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(kcr);
waitForConnectorReady(connectorName);
waitForConnectorState(connectorName, "RUNNING");
waitForConnectorCondition(connectorName, "Warning", "UnknownFields");
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorConnectConnectConnector.
/**
* Create connector, create connect, delete connect, delete connector
*/
@Test
public void testConnectorConnectConnectConnector() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnector and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect resource 'cluster' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace ns.");
verify(api, never()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(empty()));
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
// Triggered once or twice (Connect creation, Connector Status update), depending on the timing
verify(api, atLeastOnce()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
assertThat(connectDeleted, is(true));
waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect resource 'cluster' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace ns.");
boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
assertThat(connectorDeleted, is(true));
// Verify the connector was never deleted from connect as the cluster was deleted first
verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectorResourceMetricsConnectDeletion.
// MockKube2 does not support "In" selector => https://github.com/strimzi/strimzi-kafka-operator/issues/6740
@Disabled
@Test
void testConnectorResourceMetricsConnectDeletion(VertxTestContext context) {
String connectName = "cluster";
String connectorName1 = "connector1";
String connectorName2 = "connector2";
when(kafkaConnectOperator.selector()).thenReturn(Optional.of(new LabelSelector(null, Map.of("foo", "bar"))));
KafkaConnect kafkaConnect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToLabels("foo", "bar").addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect);
waitForConnectReady(connectName);
KafkaConnector connector1 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName1).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
KafkaConnector connector2 = new KafkaConnectorBuilder(connector1).editMetadata().withName(connectorName2).endMetadata().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector1);
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector2);
waitForConnectorPaused(connectorName1);
waitForConnectorPaused(connectorName2);
MeterRegistry meterRegistry = metricsProvider.meterRegistry();
Tags tags = Tags.of("kind", KafkaConnector.RESOURCE_KIND, "namespace", NAMESPACE);
Promise<Void> reconciled1 = Promise.promise();
Promise<Void> reconciled2 = Promise.promise();
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled1.complete());
Checkpoint async = context.checkpoint();
reconciled1.future().onComplete(context.succeeding(v -> context.verify(() -> {
Gauge resources = meterRegistry.get("strimzi.resources").tags(tags).gauge();
assertThat(resources.value(), is(2.0));
Gauge resourcesPaused = meterRegistry.get("strimzi.resources.paused").tags(tags).gauge();
assertThat(resourcesPaused.value(), is(2.0));
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).delete(kafkaConnect);
waitForConnectDeleted(connectName);
kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled2.complete());
reconciled2.future().onComplete(context.succeeding(v1 -> context.verify(() -> {
assertThat(resources.value(), is(0.0));
assertThat(resourcesPaused.value(), is(0.0));
async.flag();
})));
})));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.
the class ConnectorMockTest method testConnectConnectorConnectorConnect.
/**
* Create connect, create connector, delete connector, delete connect
*/
@Test
public void testConnectConnectorConnectorConnect() {
String connectName = "cluster";
String connectorName = "connector";
// Create KafkaConnect cluster and wait till it's ready
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(connectName);
// could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Create KafkaConnector and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
assertThat(connectorDeleted, is(true));
waitFor("delete call on connect REST api", 1_000, 30_000, () -> runningConnectors.isEmpty());
// Verify connector is deleted from the connect via REST api
verify(api).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
assertThat(connectDeleted, is(true));
}
use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method maybeUpdateStatusCommon.
/**
* Updates the Status field of the KafkaConnect or KafkaConnector CR. It diffs the desired status against the current status and calls
* the update only when there is any difference in non-timestamp fields.
*
* @param resource The CR of KafkaConnect or KafkaConnector
* @param reconciliation Reconciliation information
* @param desiredStatus The KafkaConnectStatus or KafkaConnectorStatus which should be set
*
* @return
*/
protected <T extends CustomResource<?, S>, S extends Status, L extends CustomResourceList<T>> Future<Void> maybeUpdateStatusCommon(CrdOperator<KubernetesClient, T, L> resourceOperator, T resource, Reconciliation reconciliation, S desiredStatus, BiFunction<T, S, T> copyWithStatus) {
Promise<Void> updateStatusPromise = Promise.promise();
resourceOperator.getAsync(resource.getMetadata().getNamespace(), resource.getMetadata().getName()).onComplete(getRes -> {
if (getRes.succeeded()) {
T fetchedResource = getRes.result();
if (fetchedResource != null) {
if ((!(fetchedResource instanceof KafkaConnector)) && (!(fetchedResource instanceof KafkaMirrorMaker2))) {
LOGGER.warnCr(reconciliation, "{} {} needs to be upgraded from version {} to 'v1beta1' to use the status field", fetchedResource.getKind(), fetchedResource.getMetadata().getName(), fetchedResource.getApiVersion());
updateStatusPromise.complete();
} else {
S currentStatus = fetchedResource.getStatus();
StatusDiff ksDiff = new StatusDiff(currentStatus, desiredStatus);
if (!ksDiff.isEmpty()) {
T resourceWithNewStatus = copyWithStatus.apply(fetchedResource, desiredStatus);
resourceOperator.updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> {
if (updateRes.succeeded()) {
LOGGER.debugCr(reconciliation, "Completed status update");
updateStatusPromise.complete();
} else {
LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause());
updateStatusPromise.fail(updateRes.cause());
}
});
} else {
LOGGER.debugCr(reconciliation, "Status did not change");
updateStatusPromise.complete();
}
}
} else {
LOGGER.errorCr(reconciliation, "Current {} resource not found", resource.getKind());
updateStatusPromise.fail("Current " + resource.getKind() + " resource not found");
}
} else {
LOGGER.errorCr(reconciliation, "Failed to get the current {} resource and its status", resource.getKind(), getRes.cause());
updateStatusPromise.fail(getRes.cause());
}
});
return updateStatusPromise.future();
}
Aggregations