use of io.strimzi.operator.common.BackOff in project strimzi by strimzi.
the class KafkaConnectAssemblyOperatorMockTest method createConnectCluster.
private Future<Void> createConnectCluster(VertxTestContext context, KafkaConnectApi kafkaConnectApi, boolean reconciliationPaused) {
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, KubernetesVersion.V1_21);
ResourceOperatorSupplier supplier = new ResourceOperatorSupplier(vertx, this.mockClient, new ZookeeperLeaderFinder(vertx, // Retry up to 3 times (4 attempts), with overall max delay of 35000ms
() -> new BackOff(5_000, 2, 4)), new DefaultAdminClientProvider(), new DefaultZookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, FeatureGates.NONE, 60_000L);
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS);
this.kco = new KafkaConnectAssemblyOperator(vertx, pfa, supplier, config, foo -> kafkaConnectApi);
Promise created = Promise.promise();
LOGGER.info("Reconciling initially -> create");
kco.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)).onComplete(context.succeeding(v -> context.verify(() -> {
if (!reconciliationPaused) {
assertThat(mockClient.apps().deployments().inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(notNullValue()));
assertThat(mockClient.configMaps().inNamespace(NAMESPACE).withName(KafkaConnectResources.metricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue()));
assertThat(mockClient.services().inNamespace(NAMESPACE).withName(KafkaConnectResources.serviceName(CLUSTER_NAME)).get(), is(notNullValue()));
assertThat(mockClient.policy().v1().podDisruptionBudget().inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(notNullValue()));
} else {
assertThat(mockClient.apps().deployments().inNamespace(NAMESPACE).withName(KafkaConnectResources.deploymentName(CLUSTER_NAME)).get(), is(nullValue()));
verify(mockClient, never()).resources(KafkaConnect.class);
}
created.complete();
})));
return created.future();
}
use of io.strimzi.operator.common.BackOff in project strimzi by strimzi.
the class KafkaConnectApiTest method test.
@IsolatedTest
@SuppressWarnings({ "unchecked", "checkstyle:MethodLength", "checkstyle:NPathComplexity" })
public void test(VertxTestContext context) {
KafkaConnectApi client = new KafkaConnectApiImpl(vertx);
Checkpoint async = context.checkpoint();
client.listConnectorPlugins(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT).onComplete(context.succeeding(connectorPlugins -> context.verify(() -> {
assertThat(connectorPlugins.size(), greaterThanOrEqualTo(2));
ConnectorPlugin fileSink = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSinkConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
assertNotNull(fileSink);
assertThat(fileSink.getType(), is("sink"));
assertThat(fileSink.getVersion(), is(not(emptyString())));
ConnectorPlugin fileSource = connectorPlugins.stream().filter(connector -> "org.apache.kafka.connect.file.FileStreamSourceConnector".equals(connector.getConnectorClass())).findFirst().orElse(null);
assertNotNull(fileSource);
assertThat(fileSource.getType(), is("source"));
assertThat(fileSource.getVersion(), is(not(emptyString())));
}))).compose(connectorPlugins -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(empty()))))).compose(connectorNames -> {
JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test", o);
}).onComplete(context.succeeding()).compose(created -> {
Promise<Map<String, Object>> promise = Promise.promise();
Handler<Long> handler = new Handler<Long>() {
@Override
public void handle(Long timerId) {
client.status(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test").onComplete(result -> {
if (result.succeeded()) {
Map<String, Object> status = result.result();
if ("RUNNING".equals(((Map) status.getOrDefault("connector", emptyMap())).get("state"))) {
promise.complete(status);
return;
} else {
System.err.println(status);
}
} else {
result.cause().printStackTrace();
}
vertx.setTimer(1000, this);
});
}
};
vertx.setTimer(1000, handler);
return promise.future();
}).onComplete(context.succeeding(status -> context.verify(() -> {
assertThat(status.get("name"), is("test"));
Map<String, Object> connectorStatus = (Map<String, Object>) status.getOrDefault("connector", emptyMap());
assertThat(connectorStatus.get("state"), is("RUNNING"));
assertThat(connectorStatus.get("worker_id"), is("localhost:18083"));
System.out.println("help " + connectorStatus);
List<Map> tasks = (List<Map>) status.get("tasks");
for (Map an : tasks) {
assertThat(an.get("state"), is("RUNNING"));
assertThat(an.get("worker_id"), is("localhost:18083"));
}
}))).compose(status -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "test")).onComplete(context.succeeding(config -> context.verify(() -> {
assertThat(config, is(TestUtils.map("connector.class", "FileStreamSource", "file", "/dev/null", "tasks.max", "1", "name", "test", "topic", "my-topic")));
}))).compose(config -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "does-not-exist")).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(((ConnectRestException) error).getStatusCode(), is(404));
}))).recover(error -> Future.succeededFuture()).compose(ignored -> client.pause("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.resume("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restart("localhost", PORT, "test")).onComplete(context.succeeding()).compose(ignored -> client.restartTask("localhost", PORT, "test", 0)).onComplete(context.succeeding()).compose(ignored -> {
JsonObject o = new JsonObject().put("connector.class", "ThisConnectorDoesNotExist").put("tasks.max", "1").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken", o);
}).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Failed to find any class that implements Connector and which name matches ThisConnectorDoesNotExist"));
}))).recover(e -> Future.succeededFuture()).compose(ignored -> {
JsonObject o = new JsonObject().put("connector.class", "FileStreamSource").put("tasks.max", "dog").put("file", "/dev/null").put("topic", "my-topic");
return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken2", o);
}).onComplete(context.failing(error -> context.verify(() -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Invalid value dog for configuration tasks.max: Not a number of type INT"));
}))).recover(e -> Future.succeededFuture()).compose(createResponse -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(singletonList("test")))))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test")).onComplete(context.succeeding()).compose(deletedConnector -> client.list("localhost", PORT)).onComplete(context.succeeding(connectorNames -> assertThat(connectorNames, is(empty())))).compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "never-existed")).onComplete(context.failing(error -> {
assertThat(error, instanceOf(ConnectRestException.class));
assertThat(error.getMessage(), containsString("Connector never-existed not found"));
async.flag();
}));
}
use of io.strimzi.operator.common.BackOff in project strimzi by strimzi.
the class ZookeeperLeaderFinderTest method test1PodClusterReturnsOnlyPodAsLeader.
@Test
public void test1PodClusterReturnsOnlyPodAsLeader(VertxTestContext context) {
ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, this::backoff);
Checkpoint a = context.checkpoint();
int firstPodIndex = 0;
finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, Set.of(createPodWithId(firstPodIndex)), dummySecret(), dummySecret()).onComplete(context.succeeding(leader -> {
context.verify(() -> assertThat(leader, is("my-cluster-kafka-0")));
a.flag();
}));
}
use of io.strimzi.operator.common.BackOff in project strimzi by strimzi.
the class ZookeeperLeaderFinderTest method testFinderHandlesFailureByLeaderFoundOnThirdAttempt.
@Test
public void testFinderHandlesFailureByLeaderFoundOnThirdAttempt(VertxTestContext context) throws InterruptedException {
int desiredLeaderId = 1;
String leaderPod = "my-cluster-kafka-1";
int succeedOnAttempt = 2;
int[] ports = startMockZks(context, 2, (id, attempt) -> attempt == succeedOnAttempt && id == desiredLeaderId);
TestingZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports);
Checkpoint a = context.checkpoint();
finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), dummySecret(), dummySecret()).onComplete(context.succeeding(leader -> context.verify(() -> {
assertThat(leader, is(leaderPod));
for (FakeZk zk : zks) {
assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(succeedOnAttempt + 1));
}
a.flag();
})));
}
use of io.strimzi.operator.common.BackOff in project strimzi by strimzi.
the class ZookeeperLeaderFinderTest method testReturnUnknownLeaderWhenMaxAttemptsExceeded.
@Test
public void testReturnUnknownLeaderWhenMaxAttemptsExceeded(VertxTestContext context) throws InterruptedException {
int[] ports = startMockZks(context, 2, (id, attempt) -> false);
ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports);
Checkpoint a = context.checkpoint();
finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), dummySecret(), dummySecret()).onComplete(context.succeeding(leader -> context.verify(() -> {
assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER));
for (FakeZk zk : zks) {
assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(MAX_ATTEMPTS + 1));
}
a.flag();
})));
}
Aggregations