use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testStorageCalculations.
@Test
void testStorageCalculations() throws IOException {
ManagedKafka mk = exampleManagedKafka("40Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
long bytes = getBrokerStorageBytes(kafka);
assertEquals(25095918893L, bytes);
assertEquals((40 * 1L << 30) - 1, kafkaCluster.unpadBrokerStorage(mk, null, 25095918893L) * 3);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method storageCalculation.
@Test
void storageCalculation() throws InterruptedException {
ManagedKafka mk = ManagedKafka.getDummyInstance(1);
mk.getMetadata().setUid(UUID.randomUUID().toString());
mk.getMetadata().setGeneration(1l);
mk.getMetadata().setResourceVersion("1");
InformerManager informerManager = Mockito.mock(InformerManager.class);
QuarkusMock.installMockForType(informerManager, InformerManager.class);
Mockito.when(informerManager.getPvcsInNamespace(Mockito.anyString())).thenReturn(List.of());
// there's no pvcs, should be 0
assertEquals("0", kafkaCluster.calculateRetentionSize(mk).getAmount());
PersistentVolumeClaim pvc = new PersistentVolumeClaimBuilder().withNewStatus().addToCapacity("storage", Quantity.parse("344Gi")).endStatus().build();
Mockito.when(informerManager.getPvcsInNamespace(Mockito.anyString())).thenReturn(List.of(pvc, pvc, pvc));
// should be the sum in Gi, less the padding
assertEquals("1000", kafkaCluster.calculateRetentionSize(mk).getAmount());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testDrainCleanerWebhookNotFound.
@Test
void testDrainCleanerWebhookNotFound() throws IOException {
DrainCleanerManager mock = Mockito.mock(DrainCleanerManager.class);
Mockito.when(mock.isDrainCleanerWebhookFound()).thenReturn(false);
QuarkusMock.installMockForType(mock, DrainCleanerManager.class);
ManagedKafka mk = exampleManagedKafka("40Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
assertNull(kafka.getSpec().getKafka().getTemplate().getPodDisruptionBudget());
assertNull(kafka.getSpec().getZookeeper().getTemplate().getPodDisruptionBudget());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class DevelopmentKafkaCluster method kafkaFrom.
/* test */
@Override
public Kafka kafkaFrom(ManagedKafka managedKafka, Kafka current) {
KafkaBuilder builder = current != null ? new KafkaBuilder(current) : new KafkaBuilder();
int replicas = getReplicas(managedKafka);
Kafka kafka = builder.editOrNewMetadata().withName(kafkaClusterName(managedKafka)).withNamespace(kafkaClusterNamespace(managedKafka)).withLabels(buildLabels(managedKafka)).endMetadata().editOrNewSpec().editOrNewKafka().withVersion(managedKafka.getSpec().getVersions().getKafka()).withReplicas(replicas).withListeners(buildListeners(managedKafka, replicas)).withStorage(buildStorage()).withConfig(buildKafkaConfig(managedKafka)).withTemplate(getKafkaTemplate(managedKafka)).withImage(kafkaImage.orElse(null)).endKafka().editOrNewZookeeper().withReplicas(this.config.getZookeeper().getReplicas()).withStorage((SingleVolumeStorage) buildStorage()).withTemplate(getZookeeperTemplate(managedKafka)).withImage(zookeeperImage.orElse(null)).endZookeeper().endSpec().build();
// setting the ManagedKafka has owner of the Kafka resource is needed
// by the operator sdk to handle events on the Kafka resource properly
OperandUtils.setAsOwner(managedKafka, kafka);
return kafka;
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class ManagedKafkaResourceType method getDefault.
/**
* get common default managedkafka instance
*
* @throws Exception
*/
public static ManagedKafka getDefault(String namespace, String appName, KeycloakInstance keycloak, String strimziVersion, String kafkaVersion) throws Exception {
final String tlsCert;
final String tlsKey;
String hostDomain = SystemTestEnvironment.BOOTSTRAP_HOST_DOMAIN;
if (!KubeClient.getInstance().isGenericKubernetes()) {
OpenShiftClient cli = KubeClient.getInstance().client().adapt(OpenShiftClient.class);
hostDomain = Optional.ofNullable(cli.operator().ingressControllers().inNamespace("openshift-ingress-operator").withName("sharded").get()).orElse(cli.operator().ingressControllers().inNamespace("openshift-ingress-operator").withName("default").get()).getStatus().getDomain();
}
if (SystemTestEnvironment.DUMMY_CERT.equals(SystemTestEnvironment.ENDPOINT_TLS_CERT)) {
SecurityUtils.TlsConfig tlsConfig = SecurityUtils.getTLSConfig(hostDomain);
tlsCert = tlsConfig.getCert();
tlsKey = tlsConfig.getKey();
} else {
tlsCert = SystemTestEnvironment.ENDPOINT_TLS_CERT;
tlsKey = SystemTestEnvironment.ENDPOINT_TLS_KEY;
}
final String oauthClientId;
final String oauthTlsCert;
final String oauthClientSecret;
final String oauthUserClaim;
final String oauthFallbackUserClaim;
final String oauthJwksEndpoint;
final String oauthTokenEndpoint;
final String oauthIssuerEndpoint;
if (keycloak != null) {
oauthClientId = "kafka";
oauthTlsCert = keycloak.getKeycloakCert();
oauthClientSecret = "kafka";
oauthUserClaim = keycloak.getUserNameClaim();
oauthFallbackUserClaim = keycloak.getFallbackUserNameClaim();
oauthJwksEndpoint = keycloak.getJwksEndpointUri();
oauthTokenEndpoint = keycloak.getOauthTokenEndpointUri();
oauthIssuerEndpoint = keycloak.getValidIssuerUri();
} else if (SystemTestEnvironment.DUMMY_OAUTH_JWKS_URI.equals(SystemTestEnvironment.OAUTH_JWKS_ENDPOINT)) {
oauthClientId = null;
oauthTlsCert = null;
oauthClientSecret = null;
oauthUserClaim = null;
oauthFallbackUserClaim = null;
oauthJwksEndpoint = null;
oauthTokenEndpoint = null;
oauthIssuerEndpoint = null;
} else {
// use defined values by env vars for oauth
oauthClientId = SystemTestEnvironment.OAUTH_CLIENT_ID;
oauthTlsCert = SystemTestEnvironment.DUMMY_CERT.equals(SystemTestEnvironment.OAUTH_TLS_CERT) ? null : SystemTestEnvironment.OAUTH_TLS_CERT;
oauthClientSecret = SystemTestEnvironment.OAUTH_CLIENT_SECRET;
oauthUserClaim = SystemTestEnvironment.OAUTH_USER_CLAIM;
oauthFallbackUserClaim = SystemTestEnvironment.OAUTH_FALLBACK_USER_CLAIM;
oauthJwksEndpoint = SystemTestEnvironment.OAUTH_JWKS_ENDPOINT;
oauthTokenEndpoint = SystemTestEnvironment.OAUTH_TOKEN_ENDPOINT;
oauthIssuerEndpoint = SystemTestEnvironment.OAUTH_ISSUER_ENDPOINT;
}
return ManagedKafka.getDefault(appName, namespace, hostDomain, tlsCert, tlsKey, oauthClientId, oauthTlsCert, oauthClientSecret, oauthUserClaim, oauthFallbackUserClaim, oauthJwksEndpoint, oauthTokenEndpoint, oauthIssuerEndpoint, strimziVersion, kafkaVersion);
}
Aggregations