use of io.strimzi.test.mockkube.MockKube in project strimzi-kafka-operator by strimzi.
the class ConfigTest method testInvalidKeystoreConfig.
@Test
public void testInvalidKeystoreConfig() {
Map<String, String> map = new HashMap<>(MANDATORY);
map.put(Config.TLS_ENABLED.key, "true");
map.put(Config.TLS_TRUSTSTORE_PASSWORD.key, "password");
MockKube mockKube = new MockKube();
KubernetesClient kubeClient = mockKube.build();
Config config = new Config(map);
Session session = new Session(kubeClient, config);
assertThrows(InvalidConfigurationException.class, () -> session.adminClientProperties());
}
use of io.strimzi.test.mockkube.MockKube in project strimzi-kafka-operator by strimzi.
the class ConfigTest method testInvalidSaslConfig.
@Test
public void testInvalidSaslConfig() {
Map<String, String> map = new HashMap<>(MANDATORY);
map.put(Config.SASL_ENABLED.key, "true");
MockKube mockKube = new MockKube();
KubernetesClient kubeClient = mockKube.build();
Config config = new Config(map);
Session session = new Session(kubeClient, config);
assertThrows(InvalidConfigurationException.class, () -> session.adminClientProperties());
String username = "admin";
String password = "password";
map.put(Config.SASL_USERNAME.key, username);
map.put(Config.SASL_PASSWORD.key, password);
Config configWithCredentials = new Config(map);
Session sessionWithCredentials = new Session(kubeClient, configWithCredentials);
assertThrows(IllegalArgumentException.class, () -> sessionWithCredentials.adminClientProperties());
}
use of io.strimzi.test.mockkube.MockKube in project strimzi-kafka-operator by strimzi.
the class JbodStorageTest method init.
@BeforeEach
private void init() {
this.volumes = new ArrayList<>(2);
volumes.add(new PersistentClaimStorageBuilder().withId(0).withDeleteClaim(true).withSize("100Gi").build());
volumes.add(new PersistentClaimStorageBuilder().withId(1).withDeleteClaim(false).withSize("100Gi").build());
this.kafka = new KafkaBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(NAME).endMetadata().withNewSpec().withNewKafka().withReplicas(3).withListeners(new GenericKafkaListenerBuilder().withName("plain").withPort(9092).withType(KafkaListenerType.INTERNAL).withTls(false).build()).withNewJbodStorage().withVolumes(volumes).endJbodStorage().endKafka().withNewZookeeper().withReplicas(1).endZookeeper().endSpec().build();
// setting up the Kafka CRD
CustomResourceDefinition kafkaAssemblyCrd = Crds.kafka();
// setting up a mock Kubernetes client
this.mockClient = new MockKube().withCustomResourceDefinition(kafkaAssemblyCrd, Kafka.class, KafkaList.class).end().withCustomResourceDefinition(Crds.strimziPodSet(), StrimziPodSet.class, StrimziPodSetList.class).end().build();
// initialize a Kafka in MockKube
Crds.kafkaOperation(this.mockClient).inNamespace(NAMESPACE).withName(NAME).create(this.kafka);
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16);
// creating the Kafka operator
ResourceOperatorSupplier ros = new ResourceOperatorSupplier(this.vertx, this.mockClient, ResourceUtils.zookeeperLeaderFinder(this.vertx, this.mockClient), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.metricsProvider(), pfa, FeatureGates.NONE, 60_000L);
this.operator = new KafkaAssemblyOperator(this.vertx, pfa, new MockCertManager(), new PasswordGenerator(10, "a", "a"), ros, ResourceUtils.dummyClusterOperatorConfig(VERSIONS, 2_000));
}
use of io.strimzi.test.mockkube.MockKube in project strimzi-kafka-operator by strimzi.
the class KafkaConnectorIT method test.
@Test
public void test(VertxTestContext context) {
KafkaConnectApiImpl connectClient = new KafkaConnectApiImpl(vertx);
KubernetesClient client = new MockKube().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class).end().build();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_20);
String namespace = "ns";
String connectorName = "my-connector";
LinkedHashMap<String, Object> config = new LinkedHashMap<>();
config.put(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_START_TIME_MS, 1_000);
config.put(TestingConnector.TASK_STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_POLL_TIME_MS, 1_000);
config.put(TestingConnector.TASK_POLL_RECORDS, 100);
config.put(TestingConnector.NUM_PARTITIONS, 1);
config.put(TestingConnector.TOPIC_NAME, "my-topic");
KafkaConnector connector = createKafkaConnector(namespace, connectorName, config);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).create(connector);
// Intercept status updates at CrdOperator level
// This is to bridge limitations between MockKube and the CrdOperator, as there are currently no Fabric8 APIs for status update
CrdOperator connectCrdOperator = mock(CrdOperator.class);
when(connectCrdOperator.updateStatusAsync(any(), any())).thenAnswer(invocation -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch((KafkaConnector) invocation.getArgument(1)));
} catch (Exception e) {
return Future.failedFuture(e);
}
});
when(connectCrdOperator.getAsync(any(), any())).thenAnswer(invocationOnMock -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).get());
} catch (Exception e) {
return Future.failedFuture(e);
}
});
MetricsProvider metrics = new MicrometerMetricsProvider();
KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, new ResourceOperatorSupplier(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, connectCrdOperator, null, null, null, null, null, null, metrics, null, null), ClusterOperatorConfig.fromMap(Collections.emptyMap(), KafkaVersionTestUtils.getKafkaVersionLookup()), connect -> new KafkaConnectApiImpl(vertx), connectCluster.getPort(2)) {
};
Checkpoint async = context.checkpoint();
operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector).onComplete(context.succeeding(v -> assertConnectorIsRunning(context, client, namespace, connectorName))).compose(v -> {
config.remove(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.START_TIME_MS, 1_000);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch(createKafkaConnector(namespace, connectorName, config));
return operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector);
}).onComplete(context.succeeding(v -> context.verify(() -> {
assertConnectorIsRunning(context, client, namespace, connectorName);
// Assert metrics from Connector Operator
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().count(), CoreMatchers.is(2L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
async.flag();
})));
}
use of io.strimzi.test.mockkube.MockKube in project strimzi-kafka-operator by strimzi.
the class KafkaConnectAssemblyOperatorMockTest method setConnectResource.
private void setConnectResource(KafkaConnect connectResource) {
mockKube = new MockKube();
mockClient = mockKube.withCustomResourceDefinition(Crds.kafkaConnect(), KafkaConnect.class, KafkaConnectList.class, KafkaConnect::getStatus, KafkaConnect::setStatus).withInitialInstances(Collections.singleton(connectResource)).end().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class, KafkaConnector::getStatus, KafkaConnector::setStatus).end().build();
}
Aggregations