Search in sources :

Example 1 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class KafkaConnectApiTest method testChangeLoggers.

@IsolatedTest
public void testChangeLoggers(VertxTestContext context) throws InterruptedException {
    String desired = "log4j.rootLogger=TRACE, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=WARN\n" + "log4j.logger.org.I0Itec.zkclient=INFO\n" + "log4j.logger.org.reflections.Reflection=INFO\n" + "log4j.logger.org.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.foo.bar.quux=DEBUG";
    KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
    Checkpoint async = context.checkpoint();
    OrderedProperties ops = new OrderedProperties();
    ops.addStringPairs(desired);
    client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops).onComplete(context.succeeding(wasChanged -> context.verify(() -> assertEquals(true, wasChanged)))).compose(a -> client.listConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(map -> context.verify(() -> {
        assertThat(map.get("root"), is("TRACE"));
        assertThat(map.get("org.apache.zookeeper"), is("WARN"));
        assertThat(map.get("org.I0Itec.zkclient"), is("INFO"));
        assertThat(map.get("org.reflections"), is("FATAL"));
        assertThat(map.get("org.reflections.Reflection"), is("INFO"));
        assertThat(map.get("org.reflections.Reflection"), is("INFO"));
        assertThat(map.get("foo"), is("WARN"));
        assertThat(map.get("foo.bar"), is("TRACE"));
        assertThat(map.get("foo.bar.quux"), is("DEBUG"));
    })))).compose(a -> client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops).onComplete(context.succeeding(wasChanged -> context.verify(() -> {
        assertEquals(false, wasChanged);
        async.flag();
    }))));
}
Also used : VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OrderedProperties(io.strimzi.operator.common.model.OrderedProperties) BackOff(io.strimzi.operator.common.BackOff) Matchers.emptyString(org.hamcrest.Matchers.emptyString) Matchers.not(org.hamcrest.Matchers.not) HashMap(java.util.HashMap) Collections.singletonList(java.util.Collections.singletonList) AfterAll(org.junit.jupiter.api.AfterAll) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Map(java.util.Map) TestUtils(io.strimzi.test.TestUtils) JsonObject(io.vertx.core.json.JsonObject) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) StrimziKafkaCluster(io.strimzi.test.container.StrimziKafkaCluster) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.empty(org.hamcrest.Matchers.empty) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Files(java.nio.file.Files) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) IOException(java.io.IOException) ConnectorPlugin(io.strimzi.api.kafka.model.connect.ConnectorPlugin) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) File(java.io.File) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) CountDownLatch(java.util.concurrent.CountDownLatch) Reconciliation(io.strimzi.operator.common.Reconciliation) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Checkpoint(io.vertx.junit5.Checkpoint) Connect(org.apache.kafka.connect.runtime.Connect) Matchers.is(org.hamcrest.Matchers.is) Handler(io.vertx.core.Handler) Matchers.containsString(org.hamcrest.Matchers.containsString) ConnectDistributed(org.apache.kafka.connect.cli.ConnectDistributed) Checkpoint(io.vertx.junit5.Checkpoint) OrderedProperties(io.strimzi.operator.common.model.OrderedProperties) Matchers.emptyString(org.hamcrest.Matchers.emptyString) Matchers.containsString(org.hamcrest.Matchers.containsString) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 2 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class KafkaConnectApiTest method testHierarchy.

@IsolatedTest
public void testHierarchy() {
    String rootLevel = "TRACE";
    String desired = "log4j.rootLogger=" + rootLevel + ", CONSOLE\n" + "log4j.logger.oorg.apache.zookeeper=WARN\n" + "log4j.logger.oorg.I0Itec.zkclient=INFO\n" + "log4j.logger.oorg.reflections.Reflection=INFO\n" + "log4j.logger.oorg.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.oorg.eclipse.jetty.util=DEBUG\n" + "log4j.logger.foo.bar.quux=DEBUG";
    KafkaConnectApiImpl client = new KafkaConnectApiImpl(vertx);
    OrderedProperties ops = new OrderedProperties();
    ops.addStringPairs(desired);
    assertEquals("TRACE", client.getEffectiveLevel("foo.bar", ops.asMap()));
    assertEquals("WARN", client.getEffectiveLevel("foo.lala", ops.asMap()));
    assertEquals(rootLevel, client.getEffectiveLevel("bar.faa", ops.asMap()));
    assertEquals("TRACE", client.getEffectiveLevel("org", ops.asMap()));
    assertEquals("DEBUG", client.getEffectiveLevel("oorg.eclipse.jetty.util.thread.strategy.EatWhatYouKill", ops.asMap()));
    assertEquals(rootLevel, client.getEffectiveLevel("oorg.eclipse.group.art", ops.asMap()));
}
Also used : OrderedProperties(io.strimzi.operator.common.model.OrderedProperties) Matchers.emptyString(org.hamcrest.Matchers.emptyString) Matchers.containsString(org.hamcrest.Matchers.containsString) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 3 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class KafkaConnectApiTest method test.

@IsolatedTest
@SuppressWarnings({ "unchecked", "checkstyle:MethodLength", "checkstyle:NPathComplexity" })
public void test(VertxTestContext context) {
    KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
    Checkpoint async = context.checkpoint();
    client.listConnectorPlugins(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(connectorPlugins -> context.verify(() -> {
        assertThat(connectorPlugins.size(), greaterThanOrEqualTo(2));
        ConnectorPlugin fileSink = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSinkConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
        assertNotNull(fileSink);
        assertThat(fileSink.getType(), is("sink"));
        assertThat(fileSink.getVersion(), is(not(emptyString())));
        ConnectorPlugin fileSource = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSourceConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
        assertNotNull(fileSource);
        assertThat(fileSource.getType(), is("source"));
        assertThat(fileSource.getVersion(), is(not(emptyString())));
    }))).compose(connectorPlugins -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(empty()))))).compose(connectorNames -> {
        JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
        return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test", o);
    }).onComplete(context.succeeding()).compose(created -> {
        Promise<Map<String, Object>> promise = Promise.promise();
        Handler<Long> handler = new Handler<Long>() {

            @Override
            public void handle(Long timerId) {
                client.status(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test").onComplete(result -> {
                    if (result.succeeded()) {
                        Map<String, Object> status = result.result();
                        if ("RUNNING".equals(((Map) status.getOrDefault("connector", emptyMap())).get("state"))) {
                            promise.complete(status);
                            return;
                        } else {
                            System.err.println(status);
                        }
                    } else {
                        result.cause().printStackTrace();
                    }
                    vertx.setTimer(1000, this);
                });
            }
        };
        vertx.setTimer(1000, handler);
        return promise.future();
    }).onComplete(context.succeeding(status -> context.verify(() -> {
        assertThat(status.get("name"), is("test"));
        Map<String, Object> connectorStatus = (Map<String, Object>) status.getOrDefault("connector", emptyMap());
        assertThat(connectorStatus.get("state"), is("RUNNING"));
        assertThat(connectorStatus.get("worker_id"), is("localhost:18083"));
        System.out.println("help " + connectorStatus);
        List<Map> tasks = (List<Map>) status.get("tasks");
        for (Map an : tasks) {
            assertThat(an.get("state"), is("RUNNING"));
            assertThat(an.get("worker_id"), is("localhost:18083"));
        }
    }))).compose(status -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "test")).onComplete(context.succeeding(config -> context.verify(() -> {
        assertThat(config, is(TestUtils.map("connector.class", "FileStreamSource", "file", "/dev/null", "tasks.max", "1", "name", "test", "topic", "my-topic")));
    }))).compose(config -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "does-not-exist")).onComplete(context.failing(error -> context.verify(() -> {
        assertThat(error, instanceOf(ConnectRestException.class));
        assertThat(((ConnectRestException) error).getStatusCode(), is(404));
    }))).recover(error -> Future.succeededFuture()).compose(ignored -> client.pause("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.resume("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restart("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restartTask("localhost", PORT, "test", 0)).onComplete(context.succeeding()).compose(ignored -> {
        JsonObject o = new JsonObject().put("connector.class", "ThisConnectorDoesNotExist").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
        return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken", o);
    }).onComplete(context.failing(error -> context.verify(() -> {
        assertThat(error, instanceOf(ConnectRestException.class));
        assertThat(error.getMessage(), containsString("Failed to find any class that implements Connector and which name matches ThisConnectorDoesNotExist"));
    }))).recover(e -> Future.succeededFuture()).compose(ignored -> {
        JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "dog").put("file", "/dev/null").put("topic", "my-topic");
        return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken2", o);
    }).onComplete(context.failing(error -> context.verify(() -> {
        assertThat(error, instanceOf(ConnectRestException.class));
        assertThat(error.getMessage(), containsString("Invalid value dog for configuration tasks.max: Not a number of type INT"));
    }))).recover(e -> Future.succeededFuture()).compose(createResponse -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(singletonList("test")))))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test")).onComplete(context.succeeding()).compose(deletedConnector -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> assertThat(connectorNames, is(empty())))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "never-existed")).onComplete(context.failing(error -> {
        assertThat(error, instanceOf(ConnectRestException.class));
        assertThat(error.getMessage(), containsString("Connector never-existed not found"));
        async.flag();
    }));
}
Also used : VertxTestContext(io.vertx.junit5.VertxTestContext) BeforeEach(org.junit.jupiter.api.BeforeEach) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OrderedProperties(io.strimzi.operator.common.model.OrderedProperties) BackOff(io.strimzi.operator.common.BackOff) Matchers.emptyString(org.hamcrest.Matchers.emptyString) Matchers.not(org.hamcrest.Matchers.not) HashMap(java.util.HashMap) Collections.singletonList(java.util.Collections.singletonList) AfterAll(org.junit.jupiter.api.AfterAll) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) IsolatedTest(io.strimzi.test.annotations.IsolatedTest) Map(java.util.Map) TestUtils(io.strimzi.test.TestUtils) JsonObject(io.vertx.core.json.JsonObject) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) StrimziKafkaCluster(io.strimzi.test.container.StrimziKafkaCluster) Collections.emptyMap(java.util.Collections.emptyMap) Matchers.empty(org.hamcrest.Matchers.empty) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) Files(java.nio.file.Files) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) IOException(java.io.IOException) ConnectorPlugin(io.strimzi.api.kafka.model.connect.ConnectorPlugin) VertxExtension(io.vertx.junit5.VertxExtension) Future(io.vertx.core.Future) File(java.io.File) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) CountDownLatch(java.util.concurrent.CountDownLatch) Reconciliation(io.strimzi.operator.common.Reconciliation) List(java.util.List) AfterEach(org.junit.jupiter.api.AfterEach) Checkpoint(io.vertx.junit5.Checkpoint) Connect(org.apache.kafka.connect.runtime.Connect) Matchers.is(org.hamcrest.Matchers.is) Handler(io.vertx.core.Handler) Matchers.containsString(org.hamcrest.Matchers.containsString) ConnectDistributed(org.apache.kafka.connect.cli.ConnectDistributed) JsonObject(io.vertx.core.json.JsonObject) Handler(io.vertx.core.Handler) Matchers.emptyString(org.hamcrest.Matchers.emptyString) Matchers.containsString(org.hamcrest.Matchers.containsString) BackOff(io.strimzi.operator.common.BackOff) ConnectorPlugin(io.strimzi.api.kafka.model.connect.ConnectorPlugin) Checkpoint(io.vertx.junit5.Checkpoint) JsonObject(io.vertx.core.json.JsonObject) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) Collections.emptyMap(java.util.Collections.emptyMap) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 4 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class ColdBackupScriptIsolatedST method backupAndRestore.

@IsolatedTest
void backupAndRestore(ExtensionContext context) {
    String clusterName = mapWithClusterNames.get(context.getDisplayName());
    String groupId = "my-group", newGroupId = "new-group";
    int firstBatchSize = 100, secondBatchSize = 10;
    String backupFilePath = USER_PATH + "/target/" + clusterName + ".zip";
    resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().build());
    String clientsPodName = deployAndGetInternalClientsPodName(context);
    InternalKafkaClient clients = buildInternalClients(context, clientsPodName, groupId, firstBatchSize);
    // send messages and consume them
    clients.sendMessagesPlain();
    clients.receiveMessagesPlain();
    // save consumer group offsets
    Map<String, String> offsetsBeforeBackup = clients.getCurrentOffsets();
    assertThat("No offsets map before backup", offsetsBeforeBackup != null && offsetsBeforeBackup.size() > 0);
    // send additional messages
    clients.setMessageCount(secondBatchSize);
    clients.sendMessagesPlain();
    // backup command
    LOGGER.info("Running backup procedure for {}/{}", INFRA_NAMESPACE, clusterName);
    String[] backupCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "backup", "-n", INFRA_NAMESPACE, "-c", clusterName, "-t", backupFilePath, "-y" };
    Exec.exec(Level.INFO, backupCommand);
    clusterOperator.unInstall();
    clusterOperator = clusterOperator.defaultInstallation().createInstallation().runInstallation();
    // restore command
    LOGGER.info("Running restore procedure for {}/{}", INFRA_NAMESPACE, clusterName);
    String[] restoreCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "restore", "-n", INFRA_NAMESPACE, "-c", clusterName, "-s", backupFilePath, "-y" };
    Exec.exec(Level.INFO, restoreCommand);
    // check consumer group offsets
    KafkaUtils.waitForKafkaReady(clusterName);
    clientsPodName = deployAndGetInternalClientsPodName(context);
    clients = buildInternalClients(context, clientsPodName, groupId, secondBatchSize);
    Map<String, String> offsetsAfterRestore = clients.getCurrentOffsets();
    assertThat("Current consumer group offsets are not the same as before the backup", offsetsAfterRestore, is(offsetsBeforeBackup));
    // check consumer group recovery
    assertThat("Consumer group is not able to recover after restore", clients.receiveMessagesPlain(), is(secondBatchSize));
    // check total number of messages
    int batchSize = firstBatchSize + secondBatchSize;
    clients = clients.toBuilder().withConsumerGroupName(newGroupId).withMessageCount(batchSize).build();
    assertThat("A new consumer group is not able to get all messages", clients.receiveMessagesPlain(), is(batchSize));
}
Also used : InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Example 5 with IsolatedTest

use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.

the class LogDumpScriptIsolatedST method dumpPartitions.

@IsolatedTest
void dumpPartitions(ExtensionContext context) {
    String clusterName = mapWithClusterNames.get(context.getDisplayName());
    String groupId = "my-group";
    String partitionNumber = "0";
    String outPath = USER_PATH + "/target/" + clusterName;
    resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().build());
    String clientsPodName = deployAndGetInternalClientsPodName(context);
    InternalKafkaClient clients = buildInternalClients(context, clientsPodName, groupId, 10);
    String topicName = mapWithTestTopics.get(context.getDisplayName());
    // send messages and consume them
    clients.sendMessagesPlain();
    clients.receiveMessagesPlain();
    // dry run
    LOGGER.info("Print partition segments from cluster {}/{}", INFRA_NAMESPACE, clusterName);
    String[] printCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "partition", "--namespace", INFRA_NAMESPACE, "--cluster", clusterName, "--topic", topicName, "--partition", partitionNumber, "--dry-run" };
    Exec.exec(Level.INFO, printCmd);
    assertThat("Output directory created in dry mode", Files.notExists(Paths.get(outPath)));
    // partition dump
    LOGGER.info("Dump topic partition from cluster {}/{}", INFRA_NAMESPACE, clusterName);
    String[] dumpPartCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "partition", "--namespace", INFRA_NAMESPACE, "--cluster", clusterName, "--topic", topicName, "--partition", partitionNumber, "--out-path", outPath };
    Exec.exec(Level.INFO, dumpPartCmd);
    assertThat("No output directory created", Files.exists(Paths.get(outPath)));
    String dumpPartFilePath = outPath + "/" + topicName + "/kafka-0-" + topicName + "-" + partitionNumber + "/00000000000000000000.log";
    assertThat("No partition file created", Files.exists(Paths.get(dumpPartFilePath)));
    assertThat("Empty partition file", new File(dumpPartFilePath).length() > 0);
    // __consumer_offsets dump
    LOGGER.info("Dump consumer offsets partition from cluster {}/{}", INFRA_NAMESPACE, clusterName);
    String[] dumpCgCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "cg_offsets", "--namespace", INFRA_NAMESPACE, "--cluster", clusterName, "--group-id", groupId, "--out-path", outPath };
    Exec.exec(Level.INFO, dumpCgCmd);
    assertThat("No output directory created", Files.exists(Paths.get(outPath)));
    String dumpCgFilePath = outPath + "/__consumer_offsets/kafka-0-__consumer_offsets-12/00000000000000000000.log";
    assertThat("No partition file created", Files.exists(Paths.get(dumpCgFilePath)));
    assertThat("Empty partition file", new File(dumpCgFilePath).length() > 0);
}
Also used : InternalKafkaClient(io.strimzi.systemtest.kafkaclients.clients.InternalKafkaClient) File(java.io.File) IsolatedTest(io.strimzi.test.annotations.IsolatedTest)

Aggregations

IsolatedTest (io.strimzi.test.annotations.IsolatedTest)16 EnvVar (io.fabric8.kubernetes.api.model.EnvVar)6 LabelSelector (io.fabric8.kubernetes.api.model.LabelSelector)6 Pod (io.fabric8.kubernetes.api.model.Pod)6 PodBuilder (io.fabric8.kubernetes.api.model.PodBuilder)6 KafkaTopic (io.strimzi.api.kafka.model.KafkaTopic)6 OrderedProperties (io.strimzi.operator.common.model.OrderedProperties)6 KafkaClients (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClients)6 KafkaClientsBuilder (io.strimzi.systemtest.kafkaclients.internalClients.KafkaClientsBuilder)6 SetupClusterOperator (io.strimzi.systemtest.resources.operator.SetupClusterOperator)6 File (java.io.File)6 ArrayList (java.util.ArrayList)6 List (java.util.List)6 Map (java.util.Map)6 Random (java.util.Random)6 Tag (org.junit.jupiter.api.Tag)6 Matchers.containsString (org.hamcrest.Matchers.containsString)5 Matchers.emptyString (org.hamcrest.Matchers.emptyString)5 ConnectorPlugin (io.strimzi.api.kafka.model.connect.ConnectorPlugin)4 BackOff (io.strimzi.operator.common.BackOff)4