Search in sources :

Example 31 with KafkaConnector

use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.

the class ConnectorMockTest method testConnectorUnknownField.

@Test
public void testConnectorUnknownField() {
    String connectName = "cluster";
    String connectorName = "connector";
    // Create KafkaConnect cluster and wait till it's ready
    Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build());
    waitForConnectReady(connectName);
    String yaml = "apiVersion: kafka.strimzi.io/v1beta2\n" + "kind: KafkaConnector\n" + "metadata:\n" + "  name: " + connectorName + "\n" + "  namespace: " + NAMESPACE + "\n" + "  labels:\n" + "    strimzi.io/cluster: " + connectName + "\n" + "spec:\n" + "  class: EchoSink\n" + "  tasksMax: 1\n" + "  unknownField: \"value\"\n" + "  config:\n" + "    level: INFO\n" + "    topics: timer-topic";
    KafkaConnector kcr = TestUtils.fromYamlString(yaml, KafkaConnector.class);
    Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(kcr);
    waitForConnectorReady(connectorName);
    waitForConnectorState(connectorName, "RUNNING");
    waitForConnectorCondition(connectorName, "Warning", "UnknownFields");
}
Also used : KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Test(org.junit.jupiter.api.Test)

Example 32 with KafkaConnector

use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.

the class ConnectorMockTest method testConnectorConnectConnectConnector.

/**
 * Create connector, create connect, delete connect, delete connector
 */
@Test
public void testConnectorConnectConnectConnector() {
    String connectName = "cluster";
    String connectorName = "connector";
    // Create KafkaConnector and wait till it's ready
    KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().endSpec().build();
    Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
    waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect resource 'cluster' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace ns.");
    verify(api, never()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
    verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
    assertThat(runningConnectors.keySet(), is(empty()));
    // Create KafkaConnect cluster and wait till it's ready
    KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
    Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
    waitForConnectReady(connectName);
    // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
    verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
    // Triggered once or twice (Connect creation, Connector Status update), depending on the timing
    verify(api, atLeastOnce()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
    assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
    boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
    assertThat(connectDeleted, is(true));
    waitForConnectorNotReady(connectorName, "NoSuchResourceException", "KafkaConnect resource 'cluster' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace ns.");
    boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
    assertThat(connectorDeleted, is(true));
    // Verify the connector was never deleted from connect as the cluster was deleted first
    verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
}
Also used : KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) KafkaConnectorBuilder(io.strimzi.api.kafka.model.KafkaConnectorBuilder) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) Test(org.junit.jupiter.api.Test)

Example 33 with KafkaConnector

use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.

the class ConnectorMockTest method testConnectorResourceMetricsConnectDeletion.

// MockKube2 does not support "In" selector => https://github.com/strimzi/strimzi-kafka-operator/issues/6740
@Disabled
@Test
void testConnectorResourceMetricsConnectDeletion(VertxTestContext context) {
    String connectName = "cluster";
    String connectorName1 = "connector1";
    String connectorName2 = "connector2";
    when(kafkaConnectOperator.selector()).thenReturn(Optional.of(new LabelSelector(null, Map.of("foo", "bar"))));
    KafkaConnect kafkaConnect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToLabels("foo", "bar").addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
    Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(kafkaConnect);
    waitForConnectReady(connectName);
    KafkaConnector connector1 = defaultKafkaConnectorBuilder().editMetadata().withName(connectorName1).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).addToAnnotations(Annotations.ANNO_STRIMZI_IO_PAUSE_RECONCILIATION, "true").endMetadata().build();
    KafkaConnector connector2 = new KafkaConnectorBuilder(connector1).editMetadata().withName(connectorName2).endMetadata().build();
    Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector1);
    Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector2);
    waitForConnectorPaused(connectorName1);
    waitForConnectorPaused(connectorName2);
    MeterRegistry meterRegistry = metricsProvider.meterRegistry();
    Tags tags = Tags.of("kind", KafkaConnector.RESOURCE_KIND, "namespace", NAMESPACE);
    Promise<Void> reconciled1 = Promise.promise();
    Promise<Void> reconciled2 = Promise.promise();
    kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled1.complete());
    Checkpoint async = context.checkpoint();
    reconciled1.future().onComplete(context.succeeding(v -> context.verify(() -> {
        Gauge resources = meterRegistry.get("strimzi.resources").tags(tags).gauge();
        assertThat(resources.value(), is(2.0));
        Gauge resourcesPaused = meterRegistry.get("strimzi.resources.paused").tags(tags).gauge();
        assertThat(resourcesPaused.value(), is(2.0));
        Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).delete(kafkaConnect);
        waitForConnectDeleted(connectName);
        kafkaConnectOperator.reconcileAll("test", NAMESPACE, ignored -> reconciled2.complete());
        reconciled2.future().onComplete(context.succeeding(v1 -> context.verify(() -> {
            assertThat(resources.value(), is(0.0));
            assertThat(resourcesPaused.value(), is(0.0));
            async.flag();
        })));
    })));
}
Also used : CoreMatchers.is(org.hamcrest.CoreMatchers.is) BeforeEach(org.junit.jupiter.api.BeforeEach) ConnectorPluginBuilder(io.strimzi.api.kafka.model.connect.ConnectorPluginBuilder) OrderedProperties(io.strimzi.operator.common.model.OrderedProperties) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) TestUtils.waitFor(io.strimzi.test.TestUtils.waitFor) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Annotations(io.strimzi.operator.common.Annotations) TimeoutException(java.util.concurrent.TimeoutException) Disabled(org.junit.jupiter.api.Disabled) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) Collections.singletonList(java.util.Collections.singletonList) Resource(io.fabric8.kubernetes.client.dsl.Resource) DefaultAdminClientProvider(io.strimzi.operator.common.DefaultAdminClientProvider) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) EnableKubernetesMockClient(io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient) Map(java.util.Map) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) JsonObject(io.vertx.core.json.JsonObject) ResourceUtils(io.strimzi.operator.cluster.ResourceUtils) MockKube2(io.strimzi.test.mockkube2.MockKube2) Tags(io.micrometer.core.instrument.Tags) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) Gauge(io.micrometer.core.instrument.Gauge) ConnectTimeoutException(io.netty.channel.ConnectTimeoutException) Predicate(java.util.function.Predicate) MetricsProvider(io.strimzi.operator.common.MetricsProvider) Mockito.atLeastOnce(org.mockito.Mockito.atLeastOnce) KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) Objects(java.util.Objects) KafkaConnectCluster(io.strimzi.operator.cluster.model.KafkaConnectCluster) List(java.util.List) Labels(io.strimzi.operator.common.model.Labels) Logger(org.apache.logging.log4j.Logger) Optional(java.util.Optional) Checkpoint(io.vertx.junit5.Checkpoint) Condition(io.strimzi.api.kafka.model.status.Condition) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) CustomResource(io.fabric8.kubernetes.client.CustomResource) Mockito.mock(org.mockito.Mockito.mock) VertxTestContext(io.vertx.junit5.VertxTestContext) Assertions.fail(org.junit.jupiter.api.Assertions.fail) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) BackOff(io.strimzi.operator.common.BackOff) Watch(io.fabric8.kubernetes.client.Watch) HashMap(java.util.HashMap) Crds(io.strimzi.api.kafka.Crds) Mockito.spy(org.mockito.Mockito.spy) KafkaVersionTestUtils(io.strimzi.operator.cluster.KafkaVersionTestUtils) ZookeeperLeaderFinder(io.strimzi.operator.cluster.operator.resource.ZookeeperLeaderFinder) TestUtils(io.strimzi.test.TestUtils) Status(io.strimzi.api.kafka.model.status.Status) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) KafkaConnectorBuilder(io.strimzi.api.kafka.model.KafkaConnectorBuilder) TestUtils.map(io.strimzi.test.TestUtils.map) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.empty(org.hamcrest.Matchers.empty) Promise(io.vertx.core.Promise) KubernetesVersion(io.strimzi.operator.KubernetesVersion) Vertx(io.vertx.core.Vertx) ConnectorPlugin(io.strimzi.api.kafka.model.connect.ConnectorPlugin) Mockito.times(org.mockito.Mockito.times) Mockito.when(org.mockito.Mockito.when) Mockito.verify(org.mockito.Mockito.verify) TimeUnit(java.util.concurrent.TimeUnit) Reconciliation(io.strimzi.operator.common.Reconciliation) AfterEach(org.junit.jupiter.api.AfterEach) Mockito.never(org.mockito.Mockito.never) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) DefaultZookeeperScalerProvider(io.strimzi.operator.cluster.operator.resource.DefaultZookeeperScalerProvider) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) KafkaConnectResources(io.strimzi.api.kafka.model.KafkaConnectResources) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Gauge(io.micrometer.core.instrument.Gauge) KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) Checkpoint(io.vertx.junit5.Checkpoint) KafkaConnectorBuilder(io.strimzi.api.kafka.model.KafkaConnectorBuilder) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) Tags(io.micrometer.core.instrument.Tags) Test(org.junit.jupiter.api.Test) Disabled(org.junit.jupiter.api.Disabled)

Example 34 with KafkaConnector

use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi by strimzi.

the class ConnectorMockTest method testConnectConnectorConnectorConnect.

/**
 * Create connect, create connector, delete connector, delete connect
 */
@Test
public void testConnectConnectorConnectorConnect() {
    String connectName = "cluster";
    String connectorName = "connector";
    // Create KafkaConnect cluster and wait till it's ready
    KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(connectName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
    Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
    waitForConnectReady(connectName);
    // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single
    verify(api, atLeastOnce()).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
    verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
    // Create KafkaConnector and wait till it's ready
    KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
    Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
    waitForConnectorReady(connectorName);
    verify(api, times(2)).list(eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT));
    verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
    assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName))));
    boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete();
    assertThat(connectorDeleted, is(true));
    waitFor("delete call on connect REST api", 1_000, 30_000, () -> runningConnectors.isEmpty());
    // Verify connector is deleted from the connect via REST api
    verify(api).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
    boolean connectDeleted = Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).delete();
    assertThat(connectDeleted, is(true));
}
Also used : KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) KafkaConnectorBuilder(io.strimzi.api.kafka.model.KafkaConnectorBuilder) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) KafkaConnect(io.strimzi.api.kafka.model.KafkaConnect) Test(org.junit.jupiter.api.Test)

Example 35 with KafkaConnector

use of io.strimzi.api.kafka.model.KafkaConnector in project strimzi-kafka-operator by strimzi.

the class AbstractConnectOperator method maybeUpdateStatusCommon.

/**
 * Updates the Status field of the KafkaConnect or KafkaConnector CR. It diffs the desired status against the current status and calls
 * the update only when there is any difference in non-timestamp fields.
 *
 * @param resource The CR of KafkaConnect or KafkaConnector
 * @param reconciliation Reconciliation information
 * @param desiredStatus The KafkaConnectStatus or KafkaConnectorStatus which should be set
 *
 * @return
 */
protected <T extends CustomResource<?, S>, S extends Status, L extends CustomResourceList<T>> Future<Void> maybeUpdateStatusCommon(CrdOperator<KubernetesClient, T, L> resourceOperator, T resource, Reconciliation reconciliation, S desiredStatus, BiFunction<T, S, T> copyWithStatus) {
    Promise<Void> updateStatusPromise = Promise.promise();
    resourceOperator.getAsync(resource.getMetadata().getNamespace(), resource.getMetadata().getName()).onComplete(getRes -> {
        if (getRes.succeeded()) {
            T fetchedResource = getRes.result();
            if (fetchedResource != null) {
                if ((!(fetchedResource instanceof KafkaConnector)) && (!(fetchedResource instanceof KafkaMirrorMaker2))) {
                    LOGGER.warnCr(reconciliation, "{} {} needs to be upgraded from version {} to 'v1beta1' to use the status field", fetchedResource.getKind(), fetchedResource.getMetadata().getName(), fetchedResource.getApiVersion());
                    updateStatusPromise.complete();
                } else {
                    S currentStatus = fetchedResource.getStatus();
                    StatusDiff ksDiff = new StatusDiff(currentStatus, desiredStatus);
                    if (!ksDiff.isEmpty()) {
                        T resourceWithNewStatus = copyWithStatus.apply(fetchedResource, desiredStatus);
                        resourceOperator.updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> {
                            if (updateRes.succeeded()) {
                                LOGGER.debugCr(reconciliation, "Completed status update");
                                updateStatusPromise.complete();
                            } else {
                                LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause());
                                updateStatusPromise.fail(updateRes.cause());
                            }
                        });
                    } else {
                        LOGGER.debugCr(reconciliation, "Status did not change");
                        updateStatusPromise.complete();
                    }
                }
            } else {
                LOGGER.errorCr(reconciliation, "Current {} resource not found", resource.getKind());
                updateStatusPromise.fail("Current " + resource.getKind() + " resource not found");
            }
        } else {
            LOGGER.errorCr(reconciliation, "Failed to get the current {} resource and its status", resource.getKind(), getRes.cause());
            updateStatusPromise.fail(getRes.cause());
        }
    });
    return updateStatusPromise.future();
}
Also used : ANNO_STRIMZI_IO_RESTART(io.strimzi.operator.common.Annotations.ANNO_STRIMZI_IO_RESTART) StatusDiff(io.strimzi.operator.cluster.model.StatusDiff) KafkaConnector(io.strimzi.api.kafka.model.KafkaConnector) KafkaMirrorMaker2(io.strimzi.api.kafka.model.KafkaMirrorMaker2)

Aggregations

KafkaConnector (io.strimzi.api.kafka.model.KafkaConnector)125 Test (org.junit.jupiter.api.Test)92 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)92 KafkaConnect (io.strimzi.api.kafka.model.KafkaConnect)91 KafkaConnectBuilder (io.strimzi.api.kafka.model.KafkaConnectBuilder)77 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)69 PlatformFeaturesAvailability (io.strimzi.operator.PlatformFeaturesAvailability)68 ResourceOperatorSupplier (io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier)68 Reconciliation (io.strimzi.operator.common.Reconciliation)68 Vertx (io.vertx.core.Vertx)68 List (java.util.List)68 KafkaConnectResources (io.strimzi.api.kafka.model.KafkaConnectResources)66 KafkaConnectCluster (io.strimzi.operator.cluster.model.KafkaConnectCluster)66 Annotations (io.strimzi.operator.common.Annotations)66 Future (io.vertx.core.Future)66 Optional (java.util.Optional)66 KubernetesVersion (io.strimzi.operator.KubernetesVersion)64 KafkaVersionTestUtils (io.strimzi.operator.cluster.KafkaVersionTestUtils)64 Checkpoint (io.vertx.junit5.Checkpoint)64 VertxExtension (io.vertx.junit5.VertxExtension)64