use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractKafkaCluster method isStrimziUpdating.
public boolean isStrimziUpdating(ManagedKafka managedKafka) {
Kafka kafka = cachedKafka(managedKafka);
if (kafka == null) {
return false;
}
Map<String, String> annotations = Objects.requireNonNullElse(kafka.getMetadata().getAnnotations(), Collections.emptyMap());
return StrimziManager.isPauseReasonStrimziUpdate(annotations) && isReconciliationPaused(managedKafka);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SecuritySecretManager method buildSecretFrom.
private static Secret buildSecretFrom(String name, String type, ManagedKafka managedKafka, Secret current, Map<String, String> dataSource) {
SecretBuilder builder = current != null ? new SecretBuilder(current) : new SecretBuilder();
Map<String, String> data = dataSource.entrySet().stream().map(entry -> Map.entry(entry.getKey(), encode(entry.getValue()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Secret secret = builder.editOrNewMetadata().withNamespace(kafkaClusterNamespace(managedKafka)).withName(name).withLabels(OperandUtils.getDefaultLabels()).endMetadata().withType(type).withData(data).build();
// setting the ManagedKafka has owner of the Secret resource is needed
// by the operator sdk to handle events on the Secret resource properly
OperandUtils.setAsOwner(managedKafka, secret);
return secret;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SecuritySecretManager method digestSecretsVersions.
public String digestSecretsVersions(ManagedKafka managedKafka, List<String> secretNames) {
final MessageDigest secretsDigest;
try {
secretsDigest = MessageDigest.getInstance("SHA-1");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
secretNames.stream().map(name -> cachedOrRemoteSecret(managedKafka, name)).filter(Objects::nonNull).map(Secret::getMetadata).forEach(secretMetadata -> {
secretsDigest.update(secretMetadata.getUid().getBytes(StandardCharsets.UTF_8));
secretsDigest.update(secretMetadata.getResourceVersion().getBytes(StandardCharsets.UTF_8));
});
return String.format("%040x", new BigInteger(1, secretsDigest.digest()));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class MockManagedKafkaFactory method loop.
@Scheduled(every = "{mock.factory.interval}")
void loop() {
Random random = new Random(System.currentTimeMillis());
log.info("Mock ManagedKafka Factory:: Running Simulation");
// feed the start of clusters
if (this.kafkas.size() == 0) {
int max = Math.abs(random.nextInt(maxKafkas));
for (int i = 0; i < max; i++) {
ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
log.infof("Mock ManagedKafka Factory::marking %s for addition", k.getId());
this.kafkas.put(k.getId(), k);
mkClient.create(k);
}
}
// delete a instance by random
if (this.kafkas.size() > 1 && random.nextBoolean()) {
int idx = Math.abs(random.nextInt(this.kafkas.size()));
int i = 0;
for (ManagedKafka k : kafkas.values()) {
if (i++ < idx) {
continue;
} else {
markForDeletion(k.getId());
break;
}
}
}
// selectively add
if (this.kafkas.size() < maxKafkas && random.nextBoolean()) {
ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
log.infof("Mock ManagedKafka Factory:: creating a new cluster %s ", k.getId());
this.kafkas.put(k.getId(), k);
mkClient.create(k);
}
log.info("--------------------------------------------------");
for (ManagedKafka mk : this.kafkas.values()) {
log.infof("ManagedKafka: %s, delete requested: %s", mk.getId(), mk.getSpec().isDeleted());
}
log.info("--------------------------------------------------");
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.
@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
ExecutorService executor = Executors.newFixedThreadPool(1);
try {
String mkAppName = "mk-test-restart-kubeapi";
ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
// start restarting kubeapi
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// Create mk using api
resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
resourceManager.addResource(extensionContext, mk);
HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
LOGGER.info("ManagedKafka {} created", mkAppName);
// wait for the sync to be up-to-date
TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
try {
String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
if (statusBody.isEmpty()) {
return false;
}
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
} catch (Exception e) {
throw new AssertionError(e);
}
});
// Get status and compare with CR status
ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
// Get agent status
ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
// Check if managed kafka deployed all components
AssertUtils.assertManagedKafka(mk);
// start restarting kubeapi
executor = Executors.newFixedThreadPool(1);
executor.execute(TestUtils::restartKubeApi);
Thread.sleep(5_000);
// delete mk using api
res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
// stop restarting kubeapi
executor.shutdownNow();
ManagedKafkaResourceType.isDeleted(mk);
LOGGER.info("ManagedKafka {} deleted", mkAppName);
} finally {
executor.shutdownNow();
}
}
Aggregations