use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method createOrUpdate.
@Override
public void createOrUpdate(ManagedKafka managedKafka) {
Kafka current = cachedKafka(managedKafka);
Kafka kafka = kafkaFrom(managedKafka, current);
createOrUpdate(kafka);
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method isReconciliationPaused.
public boolean isReconciliationPaused(ManagedKafka managedKafka) {
Kafka kafka = cachedKafka(managedKafka);
boolean isReconciliationPaused = kafka != null && kafka.getStatus() != null && hasKafkaCondition(kafka, c -> c.getType() != null && "ReconciliationPaused".equals(c.getType()) && "True".equals(c.getStatus()));
log.tracef("KafkaCluster isReconciliationPaused = %s", isReconciliationPaused);
return isReconciliationPaused;
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method isStrimziUpdating.
public boolean isStrimziUpdating(ManagedKafka managedKafka) {
Kafka kafka = cachedKafka(managedKafka);
if (kafka == null) {
return false;
}
Map<String, String> annotations = Objects.requireNonNullElse(kafka.getMetadata().getAnnotations(), Collections.emptyMap());
return StrimziManager.isPauseReasonStrimziUpdate(annotations) && isReconciliationPaused(managedKafka);
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class StrimziOperatorManager method getPreviousUpstreamStrimziVersion.
public static String getPreviousUpstreamStrimziVersion(String actualVersion) throws InterruptedException, ExecutionException {
if (!isNotTestSuiteStrimziOperatorInstalled(KubeClient.getInstance())) {
List<String> sortedReleases = Arrays.stream(GithubApiClient.getReleases("strimzi", "strimzi-kafka-operator")).filter(a -> !(a.prerelease || a.draft)).sorted((a, b) -> {
ComparableVersion aVersion = new ComparableVersion(a.name);
ComparableVersion bVersion = new ComparableVersion(b.name);
return aVersion.compareTo(bVersion);
}).map(a -> a.name).collect(Collectors.toList());
return sortedReleases.get(sortedReleases.indexOf(actualVersion) - 1);
}
return "";
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.
@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
String mkAppName = "mk-test-restart-kubeapi";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
// start restarting kubeapi
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// start restarting kubeapi
executor = Executors.newFixedThreadPool(1);
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
} finally {
executor.shutdownNow();
}
}
Aggregations