use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.
the class KafkaConnectApiTest method testChangeLoggers.
@IsolatedTest
public void testChangeLoggers(VertxTestContext context) throws InterruptedException {
String desired = "log4j.rootLogger=TRACE, CONSOLE\n" + "log4j.logger.org.apache.zookeeper=WARN\n" + "log4j.logger.org.I0Itec.zkclient=INFO\n" + "log4j.logger.org.reflections.Reflection=INFO\n" + "log4j.logger.org.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.foo.bar.quux=DEBUG";
KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
Checkpoint async = context.checkpoint();
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(desired);
client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops).onComplete(context.succeeding(wasChanged -> context.verify(() -> assertEquals(true, wasChanged)))).compose(a -> client.listConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(map -> context.verify(() -> {
assertThat(map.get("root"), is("TRACE"));
assertThat(map.get("org.apache.zookeeper"), is("WARN"));
assertThat(map.get("org.I0Itec.zkclient"), is("INFO"));
assertThat(map.get("org.reflections"), is("FATAL"));
assertThat(map.get("org.reflections.Reflection"), is("INFO"));
assertThat(map.get("org.reflections.Reflection"), is("INFO"));
assertThat(map.get("foo"), is("WARN"));
assertThat(map.get("foo.bar"), is("TRACE"));
assertThat(map.get("foo.bar.quux"), is("DEBUG"));
})))).compose(a -> client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops).onComplete(context.succeeding(wasChanged -> context.verify(() -> {
assertEquals(false, wasChanged);
async.flag();
}))));
}
use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.
the class KafkaConnectApiTest method testHierarchy.
@IsolatedTest
public void testHierarchy() {
String rootLevel = "TRACE";
String desired = "log4j.rootLogger=" + rootLevel + ", CONSOLE\n" + "log4j.logger.oorg.apache.zookeeper=WARN\n" + "log4j.logger.oorg.I0Itec.zkclient=INFO\n" + "log4j.logger.oorg.reflections.Reflection=INFO\n" + "log4j.logger.oorg.reflections=FATAL\n" + "log4j.logger.foo=WARN\n" + "log4j.logger.foo.bar=TRACE\n" + "log4j.logger.oorg.eclipse.jetty.util=DEBUG\n" + "log4j.logger.foo.bar.quux=DEBUG";
KafkaConnectApiImpl client = new KafkaConnectApiImpl(vertx);
OrderedProperties ops = new OrderedProperties();
ops.addStringPairs(desired);
assertEquals("TRACE", client.getEffectiveLevel("foo.bar", ops.asMap()));
assertEquals("WARN", client.getEffectiveLevel("foo.lala", ops.asMap()));
assertEquals(rootLevel, client.getEffectiveLevel("bar.faa", ops.asMap()));
assertEquals("TRACE", client.getEffectiveLevel("org", ops.asMap()));
assertEquals("DEBUG", client.getEffectiveLevel("oorg.eclipse.jetty.util.thread.strategy.EatWhatYouKill", ops.asMap()));
assertEquals(rootLevel, client.getEffectiveLevel("oorg.eclipse.group.art", ops.asMap()));
}
use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.
the class KafkaConnectApiTest method test.
@IsolatedTest
@SuppressWarnings({ "unchecked", "checkstyle:MethodLength", "checkstyle:NPathComplexity" })
public void test(VertxTestContext context) {
KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
Checkpoint async = context.checkpoint();
client.listConnectorPlugins(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(connectorPlugins -> context.verify(() -> {
assertThat(connectorPlugins.size(), greaterThanOrEqualTo(2));
ConnectorPlugin fileSink = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSinkConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
assertNotNull(fileSink);
assertThat(fileSink.getType(), is("sink"));
assertThat(fileSink.getVersion(), is(not(emptyString())));
ConnectorPlugin fileSource = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSourceConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
assertNotNull(fileSource);
assertThat(fileSource.getType(), is("source"));
assertThat(fileSource.getVersion(), is(not(emptyString())));
}))).compose(connectorPlugins -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(empty()))))).compose(connectorNames -> {
JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test", o);
}).onComplete(context.succeeding()).compose(created -> {
Promise<Map<String, Object>> promise = Promise.promise();
Handler<Long> handler = new Handler<Long>() {
@Override
public void handle(Long timerId) {
client.status(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test").onComplete(result -> {
if (result.succeeded()) {
Map<String, Object> status = result.result();
if ("RUNNING".equals(((Map) status.getOrDefault("connector", emptyMap())).get("state"))) {
promise.complete(status);
return;
} else {
System.err.println(status);
}
} else {
result.cause().printStackTrace();
}
vertx.setTimer(1000, this);
});
}
};
vertx.setTimer(1000, handler);
return promise.future();
}).onComplete(context.succeeding(status -> context.verify(() -> {
assertThat(status.get("name"), is("test"));
Map<String, Object> connectorStatus = (Map<String, Object>) status.getOrDefault("connector", emptyMap());
assertThat(connectorStatus.get("state"), is("RUNNING"));
assertThat(connectorStatus.get("worker_id"), is("localhost:18083"));
System.out.println("help " + connectorStatus);
List<Map> tasks = (List<Map>) status.get("tasks");
for (Map an : tasks) {
assertThat(an.get("state"), is("RUNNING"));
assertThat(an.get("worker_id"), is("localhost:18083"));
}
}))).compose(status -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "test")).onComplete(context.succeeding(config -> context.verify(() -> {
assertThat(config, is(TestUtils.map("connector.class", "FileStreamSource", "file", "/dev/null", "tasks.max", "1", "name", "test", "topic", "my-topic")));
}))).compose(config -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "does-not-exist")).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(((ConnectRestException) error).getStatusCode(), is(404));
}))).recover(error -> Future.succeededFuture()).compose(ignored -> client.pause("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.resume("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restart("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restartTask("localhost", PORT, "test", 0)).onComplete(context.succeeding()).compose(ignored -> {
JsonObject o = new JsonObject().put("connector.class", "ThisConnectorDoesNotExist").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken", o);
}).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Failed to find any class that implements Connector and which name matches ThisConnectorDoesNotExist"));
}))).recover(e -> Future.succeededFuture()).compose(ignored -> {
JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "dog").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken2", o);
}).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Invalid value dog for configuration tasks.max: Not a number of type INT"));
}))).recover(e -> Future.succeededFuture()).compose(createResponse -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(singletonList("test")))))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test")).onComplete(context.succeeding()).compose(deletedConnector -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> assertThat(connectorNames, is(empty())))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "never-existed")).onComplete(context.failing(error -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Connector never-existed not found"));
async.flag();
}));
}
use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.
the class ColdBackupScriptIsolatedST method backupAndRestore.
@IsolatedTest
void backupAndRestore(ExtensionContext context) {
String clusterName = mapWithClusterNames.get(context.getDisplayName());
String groupId = "my-group", newGroupId = "new-group";
int firstBatchSize = 100, secondBatchSize = 10;
String backupFilePath = USER_PATH + "/target/" + clusterName + ".zip";
resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().build());
String clientsPodName = deployAndGetInternalClientsPodName(context);
InternalKafkaClient clients = buildInternalClients(context, clientsPodName, groupId, firstBatchSize);
// send messages and consume them
clients.sendMessagesPlain();
clients.receiveMessagesPlain();
// save consumer group offsets
Map<String, String> offsetsBeforeBackup = clients.getCurrentOffsets();
assertThat("No offsets map before backup", offsetsBeforeBackup != null && offsetsBeforeBackup.size() > 0);
// send additional messages
clients.setMessageCount(secondBatchSize);
clients.sendMessagesPlain();
// backup command
LOGGER.info("Running backup procedure for {}/{}", INFRA_NAMESPACE, clusterName);
String[] backupCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "backup", "-n", INFRA_NAMESPACE, "-c", clusterName, "-t", backupFilePath, "-y" };
Exec.exec(Level.INFO, backupCommand);
clusterOperator.unInstall();
clusterOperator = clusterOperator.defaultInstallation().createInstallation().runInstallation();
// restore command
LOGGER.info("Running restore procedure for {}/{}", INFRA_NAMESPACE, clusterName);
String[] restoreCommand = new String[] { USER_PATH + "/../tools/cold-backup/run.sh", "restore", "-n", INFRA_NAMESPACE, "-c", clusterName, "-s", backupFilePath, "-y" };
Exec.exec(Level.INFO, restoreCommand);
// check consumer group offsets
KafkaUtils.waitForKafkaReady(clusterName);
clientsPodName = deployAndGetInternalClientsPodName(context);
clients = buildInternalClients(context, clientsPodName, groupId, secondBatchSize);
Map<String, String> offsetsAfterRestore = clients.getCurrentOffsets();
assertThat("Current consumer group offsets are not the same as before the backup", offsetsAfterRestore, is(offsetsBeforeBackup));
// check consumer group recovery
assertThat("Consumer group is not able to recover after restore", clients.receiveMessagesPlain(), is(secondBatchSize));
// check total number of messages
int batchSize = firstBatchSize + secondBatchSize;
clients = clients.toBuilder().withConsumerGroupName(newGroupId).withMessageCount(batchSize).build();
assertThat("A new consumer group is not able to get all messages", clients.receiveMessagesPlain(), is(batchSize));
}
use of io.strimzi.test.annotations.IsolatedTest in project strimzi by strimzi.
the class LogDumpScriptIsolatedST method dumpPartitions.
@IsolatedTest
void dumpPartitions(ExtensionContext context) {
String clusterName = mapWithClusterNames.get(context.getDisplayName());
String groupId = "my-group";
String partitionNumber = "0";
String outPath = USER_PATH + "/target/" + clusterName;
resourceManager.createResource(context, KafkaTemplates.kafkaPersistent(clusterName, 1, 1).editMetadata().withNamespace(INFRA_NAMESPACE).endMetadata().build());
String clientsPodName = deployAndGetInternalClientsPodName(context);
InternalKafkaClient clients = buildInternalClients(context, clientsPodName, groupId, 10);
String topicName = mapWithTestTopics.get(context.getDisplayName());
// send messages and consume them
clients.sendMessagesPlain();
clients.receiveMessagesPlain();
// dry run
LOGGER.info("Print partition segments from cluster {}/{}", INFRA_NAMESPACE, clusterName);
String[] printCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "partition", "--namespace", INFRA_NAMESPACE, "--cluster", clusterName, "--topic", topicName, "--partition", partitionNumber, "--dry-run" };
Exec.exec(Level.INFO, printCmd);
assertThat("Output directory created in dry mode", Files.notExists(Paths.get(outPath)));
// partition dump
LOGGER.info("Dump topic partition from cluster {}/{}", INFRA_NAMESPACE, clusterName);
String[] dumpPartCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "partition", "--namespace", INFRA_NAMESPACE, "--cluster", clusterName, "--topic", topicName, "--partition", partitionNumber, "--out-path", outPath };
Exec.exec(Level.INFO, dumpPartCmd);
assertThat("No output directory created", Files.exists(Paths.get(outPath)));
String dumpPartFilePath = outPath + "/" + topicName + "/kafka-0-" + topicName + "-" + partitionNumber + "/00000000000000000000.log";
assertThat("No partition file created", Files.exists(Paths.get(dumpPartFilePath)));
assertThat("Empty partition file", new File(dumpPartFilePath).length() > 0);
// __consumer_offsets dump
LOGGER.info("Dump consumer offsets partition from cluster {}/{}", INFRA_NAMESPACE, clusterName);
String[] dumpCgCmd = new String[] { USER_PATH + "/../tools/log-dump/run.sh", "cg_offsets", "--namespace", INFRA_NAMESPACE, "--cluster", clusterName, "--group-id", groupId, "--out-path", outPath };
Exec.exec(Level.INFO, dumpCgCmd);
assertThat("No output directory created", Files.exists(Paths.get(outPath)));
String dumpCgFilePath = outPath + "/__consumer_offsets/kafka-0-__consumer_offsets-12/00000000000000000000.log";
assertThat("No partition file created", Files.exists(Paths.get(dumpCgFilePath)));
assertThat("Empty partition file", new File(dumpCgFilePath).length() > 0);
}
Aggregations