use of io.strimzi.test.mockkube.MockKube in project strimzi by strimzi.
the class KafkaAssemblyOperatorCustomCertTest method setup.
@BeforeEach
public void setup() {
kafka = createKafka();
client = new MockKube().withCustomResourceDefinition(Crds.kafka(), Kafka.class, KafkaList.class).end().withCustomResourceDefinition(Crds.strimziPodSet(), StrimziPodSet.class, StrimziPodSetList.class).end().build();
Crds.kafkaOperation(client).inNamespace(namespace).create(kafka);
client.secrets().inNamespace(namespace).create(getTlsSecret());
client.secrets().inNamespace(namespace).create(getExternalSecret());
Secret secret = new SecretBuilder().withNewMetadata().withNamespace(namespace).withName("testkafka-cluster-operator-certs").endMetadata().addToData("foo", "bar").build();
client.secrets().inNamespace(namespace).create(secret);
ResourceOperatorSupplier supplier = new ResourceOperatorSupplier(vertx, client, mock(ZookeeperLeaderFinder.class), mock(AdminClientProvider.class), mock(ZookeeperScalerProvider.class), mock(MetricsProvider.class), new PlatformFeaturesAvailability(false, KubernetesVersion.V1_20), FeatureGates.NONE, 10000);
operator = new MockKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, passwordGenerator, supplier, config);
}
use of io.strimzi.test.mockkube.MockKube in project strimzi by strimzi.
the class JbodStorageTest method init.
@BeforeEach
private void init() {
this.volumes = new ArrayList<>(2);
volumes.add(new PersistentClaimStorageBuilder().withId(0).withDeleteClaim(true).withSize("100Gi").build());
volumes.add(new PersistentClaimStorageBuilder().withId(1).withDeleteClaim(false).withSize("100Gi").build());
this.kafka = new KafkaBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(NAME).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(volumes).endJbodStorage().endKafka().withNewZookeeper().withReplicas(1).endZookeeper().endSpec().build();
// setting up the Kafka CRD
CustomResourceDefinition kafkaAssemblyCrd = Crds.kafka();
// setting up a mock Kubernetes client
this.mockClient = new MockKube().withCustomResourceDefinition(kafkaAssemblyCrd, Kafka.class, KafkaList.class).end().withCustomResourceDefinition(Crds.strimziPodSet(), StrimziPodSet.class, StrimziPodSetList.class).end().build();
// initialize a Kafka in MockKube
Crds.kafkaOperation(this.mockClient).inNamespace(NAMESPACE).withName(NAME).create(this.kafka);
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16);
// creating the Kafka operator
ResourceOperatorSupplier ros = new ResourceOperatorSupplier(this.vertx, this.mockClient, ResourceUtils.zookeeperLeaderFinder(this.vertx, this.mockClient), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, FeatureGates.NONE, 60_000L);
this.operator = new KafkaAssemblyOperator(this.vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), ros, ResourceUtils.dummyClusterOperatorConfig(VERSIONS, 2_000));
}
use of io.strimzi.test.mockkube.MockKube in project strimzi by strimzi.
the class KafkaConnectAssemblyOperatorMockTest method setConnectResource.
private void setConnectResource(KafkaConnect connectResource) {
mockKube = new MockKube();
mockClient = mockKube.withCustomResourceDefinition(Crds.kafkaConnect(), KafkaConnect.class, KafkaConnectList.class, KafkaConnect::getStatus, KafkaConnect::setStatus).withInitialInstances(Collections.singleton(connectResource)).end().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class, KafkaConnector::getStatus, KafkaConnector::setStatus).end().build();
}
use of io.strimzi.test.mockkube.MockKube in project strimzi by strimzi.
the class KafkaConnectorIT method test.
@Test
public void test(VertxTestContext context) {
KafkaConnectApiImpl connectClient = new KafkaConnectApiImpl(vertx);
KubernetesClient client = new MockKube().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class).end().build();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_20);
String namespace = "ns";
String connectorName = "my-connector";
LinkedHashMap<String, Object> config = new LinkedHashMap<>();
config.put(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_START_TIME_MS, 1_000);
config.put(TestingConnector.TASK_STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_POLL_TIME_MS, 1_000);
config.put(TestingConnector.TASK_POLL_RECORDS, 100);
config.put(TestingConnector.NUM_PARTITIONS, 1);
config.put(TestingConnector.TOPIC_NAME, "my-topic");
KafkaConnector connector = createKafkaConnector(namespace, connectorName, config);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).create(connector);
// Intercept status updates at CrdOperator level
// This is to bridge limitations between MockKube and the CrdOperator, as there are currently no Fabric8 APIs for status update
CrdOperator connectCrdOperator = mock(CrdOperator.class);
when(connectCrdOperator.updateStatusAsync(any(), any())).thenAnswer(invocation -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch((KafkaConnector) invocation.getArgument(1)));
} catch (Exception e) {
return Future.failedFuture(e);
}
});
when(connectCrdOperator.getAsync(any(), any())).thenAnswer(invocationOnMock -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).get());
} catch (Exception e) {
return Future.failedFuture(e);
}
});
MetricsProvider metrics = new MicrometerMetricsProvider();
KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, new ResourceOperatorSupplier(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, connectCrdOperator, null, null, null, null, null, null, metrics, null, null), ClusterOperatorConfig.fromMap(Collections.emptyMap(), KafkaVersionTestUtils.getKafkaVersionLookup()), connect -> new KafkaConnectApiImpl(vertx), connectCluster.getPort(2)) {
};
Checkpoint async = context.checkpoint();
operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector).onComplete(context.succeeding(v -> assertConnectorIsRunning(context, client, namespace, connectorName))).compose(v -> {
config.remove(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.START_TIME_MS, 1_000);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch(createKafkaConnector(namespace, connectorName, config));
return operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector);
}).onComplete(context.succeeding(v -> context.verify(() -> {
assertConnectorIsRunning(context, client, namespace, connectorName);
// Assert metrics from Connector Operator
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().count(), CoreMatchers.is(2L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
async.flag();
})));
}
use of io.strimzi.test.mockkube.MockKube in project strimzi by strimzi.
the class KafkaUpgradeDowngradeMockTest method initialize.
private Future<Void> initialize(VertxTestContext context, Kafka initialKafka) {
CustomResourceDefinition kafkaAssemblyCrd = Crds.kafka();
client = new MockKube().withCustomResourceDefinition(kafkaAssemblyCrd, Kafka.class, KafkaList.class).withInitialInstances(Collections.singleton(initialKafka)).end().withCustomResourceDefinition(Crds.strimziPodSet(), StrimziPodSet.class, StrimziPodSetList.class).end().build();
ResourceOperatorSupplier supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, FeatureGates.NONE, 2_000);
ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS);
operator = new KafkaAssemblyOperator(vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, config);
LOGGER.info("Reconciling initially -> create");
return operator.reconcile(new Reconciliation("initial-reconciliation", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME));
}
Aggregations