use of io.strimzi.operator.common.MicrometerMetricsProvider in project strimzi by strimzi.
the class KafkaConnectorIT method test.
@Test
public void test(VertxTestContext context) {
KafkaConnectApiImpl connectClient = new KafkaConnectApiImpl(vertx);
KubernetesClient client = new MockKube().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class).end().build();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_20);
String namespace = "ns";
String connectorName = "my-connector";
LinkedHashMap<String, Object> config = new LinkedHashMap<>();
config.put(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_START_TIME_MS, 1_000);
config.put(TestingConnector.TASK_STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_POLL_TIME_MS, 1_000);
config.put(TestingConnector.TASK_POLL_RECORDS, 100);
config.put(TestingConnector.NUM_PARTITIONS, 1);
config.put(TestingConnector.TOPIC_NAME, "my-topic");
KafkaConnector connector = createKafkaConnector(namespace, connectorName, config);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).create(connector);
// Intercept status updates at CrdOperator level
// This is to bridge limitations between MockKube and the CrdOperator, as there are currently no Fabric8 APIs for status update
CrdOperator connectCrdOperator = mock(CrdOperator.class);
when(connectCrdOperator.updateStatusAsync(any(), any())).thenAnswer(invocation -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch((KafkaConnector) invocation.getArgument(1)));
} catch (Exception e) {
return Future.failedFuture(e);
}
});
when(connectCrdOperator.getAsync(any(), any())).thenAnswer(invocationOnMock -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).get());
} catch (Exception e) {
return Future.failedFuture(e);
}
});
MetricsProvider metrics = new MicrometerMetricsProvider();
KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, new ResourceOperatorSupplier(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, connectCrdOperator, null, null, null, null, null, null, metrics, null, null), ClusterOperatorConfig.fromMap(Collections.emptyMap(), KafkaVersionTestUtils.getKafkaVersionLookup()), connect -> new KafkaConnectApiImpl(vertx), connectCluster.getPort(2)) {
};
Checkpoint async = context.checkpoint();
operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector).onComplete(context.succeeding(v -> assertConnectorIsRunning(context, client, namespace, connectorName))).compose(v -> {
config.remove(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.START_TIME_MS, 1_000);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch(createKafkaConnector(namespace, connectorName, config));
return operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector);
}).onComplete(context.succeeding(v -> context.verify(() -> {
assertConnectorIsRunning(context, client, namespace, connectorName);
// Assert metrics from Connector Operator
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().count(), CoreMatchers.is(2L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
async.flag();
})));
}
use of io.strimzi.operator.common.MicrometerMetricsProvider in project strimzi-kafka-operator by strimzi.
the class KafkaConnectorIT method test.
@Test
public void test(VertxTestContext context) {
KafkaConnectApiImpl connectClient = new KafkaConnectApiImpl(vertx);
KubernetesClient client = new MockKube().withCustomResourceDefinition(Crds.kafkaConnector(), KafkaConnector.class, KafkaConnectorList.class).end().build();
PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(false, KubernetesVersion.V1_20);
String namespace = "ns";
String connectorName = "my-connector";
LinkedHashMap<String, Object> config = new LinkedHashMap<>();
config.put(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_START_TIME_MS, 1_000);
config.put(TestingConnector.TASK_STOP_TIME_MS, 0);
config.put(TestingConnector.TASK_POLL_TIME_MS, 1_000);
config.put(TestingConnector.TASK_POLL_RECORDS, 100);
config.put(TestingConnector.NUM_PARTITIONS, 1);
config.put(TestingConnector.TOPIC_NAME, "my-topic");
KafkaConnector connector = createKafkaConnector(namespace, connectorName, config);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).create(connector);
// Intercept status updates at CrdOperator level
// This is to bridge limitations between MockKube and the CrdOperator, as there are currently no Fabric8 APIs for status update
CrdOperator connectCrdOperator = mock(CrdOperator.class);
when(connectCrdOperator.updateStatusAsync(any(), any())).thenAnswer(invocation -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch((KafkaConnector) invocation.getArgument(1)));
} catch (Exception e) {
return Future.failedFuture(e);
}
});
when(connectCrdOperator.getAsync(any(), any())).thenAnswer(invocationOnMock -> {
try {
return Future.succeededFuture(Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).get());
} catch (Exception e) {
return Future.failedFuture(e);
}
});
MetricsProvider metrics = new MicrometerMetricsProvider();
KafkaConnectAssemblyOperator operator = new KafkaConnectAssemblyOperator(vertx, pfa, new ResourceOperatorSupplier(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, connectCrdOperator, null, null, null, null, null, null, metrics, null, null), ClusterOperatorConfig.fromMap(Collections.emptyMap(), KafkaVersionTestUtils.getKafkaVersionLookup()), connect -> new KafkaConnectApiImpl(vertx), connectCluster.getPort(2)) {
};
Checkpoint async = context.checkpoint();
operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector).onComplete(context.succeeding(v -> assertConnectorIsRunning(context, client, namespace, connectorName))).compose(v -> {
config.remove(TestingConnector.START_TIME_MS, 1_000);
config.put(TestingConnector.START_TIME_MS, 1_000);
Crds.kafkaConnectorOperation(client).inNamespace(namespace).withName(connectorName).patch(createKafkaConnector(namespace, connectorName, config));
return operator.reconcileConnectorAndHandleResult(new Reconciliation("test", "KafkaConnect", namespace, "bogus"), "localhost", connectClient, true, connectorName, connector);
}).onComplete(context.succeeding(v -> context.verify(() -> {
assertConnectorIsRunning(context, client, namespace, connectorName);
// Assert metrics from Connector Operator
MeterRegistry registry = metrics.meterRegistry();
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", KafkaConnector.RESOURCE_KIND).counter().count(), CoreMatchers.is(2.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().count(), CoreMatchers.is(2L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", KafkaConnector.RESOURCE_KIND).timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
async.flag();
})));
}
use of io.strimzi.operator.common.MicrometerMetricsProvider in project strimzi by strimzi.
the class TopicOperatorTest method createCleanMetricsProvider.
/**
* Created new MetricsProvider and makes sure it doesn't contain any metrics from previous tests.
*
* @return Clean MetricsProvider
*/
public MetricsProvider createCleanMetricsProvider() {
MetricsProvider metrics = new MicrometerMetricsProvider();
MeterRegistry registry = metrics.meterRegistry();
registry.forEachMeter(meter -> {
registry.remove(meter);
});
return metrics;
}
use of io.strimzi.operator.common.MicrometerMetricsProvider in project strimzi-kafka-operator by strimzi.
the class TopicOperatorTest method createCleanMetricsProvider.
/**
* Created new MetricsProvider and makes sure it doesn't contain any metrics from previous tests.
*
* @return Clean MetricsProvider
*/
public MetricsProvider createCleanMetricsProvider() {
MetricsProvider metrics = new MicrometerMetricsProvider();
MeterRegistry registry = metrics.meterRegistry();
registry.forEachMeter(meter -> {
registry.remove(meter);
});
return metrics;
}
Aggregations