use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method pausedUnknownStatus.
@Test
void pausedUnknownStatus() throws InterruptedException {
ManagedKafka mk = ManagedKafka.getDummyInstance(1);
InformerManager informer = Mockito.mock(InformerManager.class);
Kafka kafka = new KafkaBuilder(this.kafkaCluster.kafkaFrom(mk, null)).editMetadata().withAnnotations(Map.of(StrimziManager.STRIMZI_PAUSE_REASON_ANNOTATION, "custom")).endMetadata().withNewStatus().withConditions(new ConditionBuilder().withType("ReconciliationPaused").withStatus("True").build()).endStatus().build();
Mockito.when(informer.getLocalKafka(Mockito.anyString(), Mockito.anyString())).thenReturn(kafka);
QuarkusMock.installMockForType(informer, InformerManager.class);
OperandReadiness readiness = this.kafkaCluster.getReadiness(mk);
assertEquals(Status.Unknown, readiness.getStatus());
assertEquals(Reason.Paused, readiness.getReason());
assertEquals("Kafka mk-1 is paused for an unknown reason", readiness.getMessage());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testScalingAndReplicationFactor.
@Test
void testScalingAndReplicationFactor() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
KafkaInstanceConfiguration clone = Serialization.clone(config);
clone.getKafka().setScalingAndReplicationFactor(1);
kafkaCluster.setKafkaConfiguration(clone);
ManagedKafka mk = exampleManagedKafka("60Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka.getSpec().getKafka().getConfig(), "/expected/scaling-one.yml");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaWithCustomConfiguration.
@Test
void testManagedKafkaToKafkaWithCustomConfiguration() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
KafkaInstanceConfiguration clone = Serialization.clone(config);
clone.getKafka().setConnectionAttemptsPerSec(300);
clone.getKafka().setContainerMemory("2Gi");
clone.getKafka().setJvmXx("foo bar, foo2 bar2");
clone.getZookeeper().setReplicas(5);
clone.getZookeeper().setContainerMemory("11Gi");
clone.getZookeeper().setJvmXx("zkfoo zkbar, zkfoo2 zkbar2");
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
ManagedKafka mk = exampleManagedKafka("60Gi");
mk.getSpec().getCapacity().setMaxPartitions(2 * clone.getKafka().getPartitionCapacity());
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka, "/expected/custom-config-strimzi.yml");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testDrainCleanerWebhookFound.
@Test
void testDrainCleanerWebhookFound() throws IOException {
DrainCleanerManager mock = Mockito.mock(DrainCleanerManager.class);
Mockito.when(mock.isDrainCleanerWebhookFound()).thenReturn(true);
QuarkusMock.installMockForType(mock, DrainCleanerManager.class);
ManagedKafka mk = exampleManagedKafka("40Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
assertEquals(0, kafka.getSpec().getKafka().getTemplate().getPodDisruptionBudget().getMaxUnavailable());
assertEquals(0, kafka.getSpec().getZookeeper().getTemplate().getPodDisruptionBudget().getMaxUnavailable());
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafka.
@Test
void testManagedKafkaToKafka() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
ObjectMapper objectMapper = new ObjectMapper();
KafkaInstanceConfiguration clone = objectMapper.readValue(objectMapper.writeValueAsString(config), KafkaInstanceConfiguration.class);
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
ManagedKafka mk = exampleManagedKafka("60Gi");
Kafka kafka = kafkaCluster.kafkaFrom(mk, null);
diffToExpected(kafka, "/expected/strimzi.yml");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
Aggregations