use of io.strimzi.api.kafka.model.status.Status in project strimzi by strimzi.
the class ConnectorMockTest method testChangeStrimziClusterLabel.
/**
* Change the cluster label from one cluster to another
* check the connector is deleted from the old cluster
* check the connector is added to the new cluster
*/
@Test
public void testChangeStrimziClusterLabel(VertxTestContext context) throws InterruptedException {
String oldConnectClusterName = "cluster1";
String newConnectClusterName = "cluster2";
String connectorName = "connector";
// Create two connect clusters
KafkaConnect connect = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(oldConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect);
waitForConnectReady(oldConnectClusterName);
KafkaConnect connect2 = new KafkaConnectBuilder().withNewMetadata().withNamespace(NAMESPACE).withName(newConnectClusterName).addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().withNewSpec().withReplicas(1).endSpec().build();
Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).create(connect2);
waitForConnectReady(newConnectClusterName);
// Create KafkaConnector associated with the first cluster using the Strimzi Cluster label and wait till it's ready
KafkaConnector connector = new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, oldConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build();
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).create(connector);
waitForConnectorReady(connectorName);
// triggered twice (Connect creation, Connector Status update) for the first cluster
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// never triggered for the second cluster as connector's Strimzi cluster label does not match cluster 2
verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// patch connector with new Strimzi cluster label associated with cluster 2
Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).patch(new KafkaConnectorBuilder().withNewMetadata().withName(connectorName).withNamespace(NAMESPACE).addToLabels(Labels.STRIMZI_CLUSTER_LABEL, newConnectClusterName).endMetadata().withNewSpec().withTasksMax(1).withClassName("Dummy").endSpec().build());
waitForConnectorReady(connectorName);
// Note: The connector does not get deleted immediately from the first cluster, only on the next timed reconciliation
verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any());
// Force reconciliation to assert connector deletion request occurs for first cluster
Checkpoint async = context.checkpoint();
kafkaConnectOperator.reconcile(new Reconciliation("test", "KafkaConnect", NAMESPACE, oldConnectClusterName)).onComplete(context.succeeding(v -> context.verify(() -> {
verify(api, times(1)).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName));
async.flag();
})));
}
use of io.strimzi.api.kafka.model.status.Status in project strimzi by strimzi.
the class ConnectorMockTest method setupMockConnectAPI.
private void setupMockConnectAPI() {
api = mock(KafkaConnectApi.class);
runningConnectors = new HashMap<>();
when(api.list(any(), anyInt())).thenAnswer(i -> {
String host = i.getArgument(0);
String matchingKeyPrefix = host + "##";
return Future.succeededFuture(runningConnectors.keySet().stream().filter(s -> s.startsWith(matchingKeyPrefix)).map(s -> s.substring(matchingKeyPrefix.length())).collect(Collectors.toList()));
});
when(api.listConnectorPlugins(any(), any(), anyInt())).thenAnswer(i -> {
ConnectorPlugin connectorPlugin = new ConnectorPluginBuilder().withConnectorClass("io.strimzi.MyClass").withType("sink").withVersion("1.0.0").build();
return Future.succeededFuture(Collections.singletonList(connectorPlugin));
});
when(api.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture());
when(api.getConnectorConfig(any(), any(), any(), anyInt(), any())).thenAnswer(invocation -> {
String host = invocation.getArgument(2);
String connectorName = invocation.getArgument(4);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState != null) {
Map<String, String> map = new HashMap<>();
map.put("name", connectorName);
for (Map.Entry<String, Object> entry : connectorState.config) {
if (entry.getValue() != null) {
map.put(entry.getKey(), entry.getValue().toString());
}
}
return Future.succeededFuture(map);
} else {
return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s/config", connectorName), 404, "Not Found", ""));
}
});
when(api.getConnector(any(), any(), anyInt(), any())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
String connectorName = invocation.getArgument(3);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s", connectorName), 404, "Not Found", ""));
}
return Future.succeededFuture(TestUtils.map("name", connectorName, "config", connectorState.config, "tasks", emptyMap()));
});
when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenAnswer(invocation -> {
LOGGER.info((String) invocation.getArgument(1) + invocation.getArgument(2) + invocation.getArgument(3) + invocation.getArgument(4));
String host = invocation.getArgument(1);
LOGGER.info("###### create " + host);
String connectorName = invocation.getArgument(3);
JsonObject connectorConfig = invocation.getArgument(4);
runningConnectors.putIfAbsent(key(host, connectorName), new ConnectorState(false, connectorConfig));
return Future.succeededFuture();
});
when(api.delete(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
LOGGER.info("###### delete " + host);
String connectorName = invocation.getArgument(3);
ConnectorState remove = runningConnectors.remove(key(host, connectorName));
return remove != null ? Future.succeededFuture() : Future.failedFuture("No such connector " + connectorName);
});
when(api.statusWithBackOff(any(), any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(2);
LOGGER.info("###### status " + host);
String connectorName = invocation.getArgument(4);
return kafkaConnectApiStatusMock(host, connectorName);
});
when(api.status(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
LOGGER.info("###### status " + host);
String connectorName = invocation.getArgument(3);
return kafkaConnectApiStatusMock(host, connectorName);
});
when(api.pause(any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
if (!connectorState.paused) {
runningConnectors.put(key(host, connectorName), new ConnectorState(true, connectorState.config));
}
return Future.succeededFuture();
});
when(api.resume(any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
if (connectorState.paused) {
runningConnectors.put(key(host, connectorName), new ConnectorState(false, connectorState.config));
}
return Future.succeededFuture();
});
when(api.restart(any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
return Future.succeededFuture();
});
when(api.restartTask(any(), anyInt(), anyString(), anyInt())).thenAnswer(invocation -> {
String host = invocation.getArgument(0);
String connectorName = invocation.getArgument(2);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("PUT", "", 404, "Not found", "Connector name " + connectorName));
}
return Future.succeededFuture();
});
when(api.getConnectorTopics(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> {
String host = invocation.getArgument(1);
String connectorName = invocation.getArgument(3);
ConnectorState connectorState = runningConnectors.get(key(host, connectorName));
if (connectorState == null) {
return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s/topics", connectorName), 404, "Not Found", ""));
}
return Future.succeededFuture(List.of("my-topic"));
});
}
use of io.strimzi.api.kafka.model.status.Status in project strimzi by strimzi.
the class AbstractOperator method reconcile.
/**
* Reconcile assembly resources in the given namespace having the given {@code name}.
* Reconciliation works by getting the assembly resource (e.g. {@code KafkaUser})
* in the given namespace with the given name and
* comparing with the corresponding resource.
* @param reconciliation The reconciliation.
* @return A Future which is completed with the result of the reconciliation.
*/
@Override
@SuppressWarnings("unchecked")
public final Future<Void> reconcile(Reconciliation reconciliation) {
String namespace = reconciliation.namespace();
String name = reconciliation.name();
reconciliationsCounter(reconciliation.namespace()).increment();
Timer.Sample reconciliationTimerSample = Timer.start(metrics.meterRegistry());
Future<Void> handler = withLock(reconciliation, LOCK_TIMEOUT_MS, () -> {
T cr = resourceOperator.get(namespace, name);
if (cr != null) {
if (!Util.matchesSelector(selector(), cr)) {
// When the labels matching the selector are removed from the custom resource, a DELETE event is
// triggered by the watch even through the custom resource might not match the watch labels anymore
// and might not be really deleted. We have to filter these situations out and ignore the
// reconciliation because such resource might be already operated by another instance (where the
// same change triggered ADDED event).
LOGGER.debugCr(reconciliation, "{} {} in namespace {} does not match label selector {} and will be ignored", kind(), name, namespace, selector().get().getMatchLabels());
return Future.succeededFuture();
}
Promise<Void> createOrUpdate = Promise.promise();
if (Annotations.isReconciliationPausedWithAnnotation(cr)) {
S status = createStatus();
Set<Condition> conditions = validate(reconciliation, cr);
conditions.add(StatusUtils.getPausedCondition());
status.setConditions(new ArrayList<>(conditions));
status.setObservedGeneration(cr.getStatus() != null ? cr.getStatus().getObservedGeneration() : 0);
updateStatus(reconciliation, status).onComplete(statusResult -> {
if (statusResult.succeeded()) {
createOrUpdate.complete();
} else {
createOrUpdate.fail(statusResult.cause());
}
});
pausedResourceCounter(namespace).getAndIncrement();
LOGGER.debugCr(reconciliation, "Reconciliation of {} {} is paused", kind, name);
return createOrUpdate.future();
} else if (cr.getSpec() == null) {
InvalidResourceException exception = new InvalidResourceException("Spec cannot be null");
S status = createStatus();
Condition errorCondition = new ConditionBuilder().withLastTransitionTime(StatusUtils.iso8601Now()).withType("NotReady").withStatus("True").withReason(exception.getClass().getSimpleName()).withMessage(exception.getMessage()).build();
status.setObservedGeneration(cr.getMetadata().getGeneration());
status.addCondition(errorCondition);
LOGGER.errorCr(reconciliation, "{} spec cannot be null", cr.getMetadata().getName());
updateStatus(reconciliation, status).onComplete(notUsed -> {
createOrUpdate.fail(exception);
});
return createOrUpdate.future();
}
Set<Condition> unknownAndDeprecatedConditions = validate(reconciliation, cr);
LOGGER.infoCr(reconciliation, "{} {} will be checked for creation or modification", kind, name);
createOrUpdate(reconciliation, cr).onComplete(res -> {
if (res.succeeded()) {
S status = res.result();
addWarningsToStatus(status, unknownAndDeprecatedConditions);
updateStatus(reconciliation, status).onComplete(statusResult -> {
if (statusResult.succeeded()) {
createOrUpdate.complete();
} else {
createOrUpdate.fail(statusResult.cause());
}
});
} else {
if (res.cause() instanceof ReconciliationException) {
ReconciliationException e = (ReconciliationException) res.cause();
Status status = e.getStatus();
addWarningsToStatus(status, unknownAndDeprecatedConditions);
LOGGER.errorCr(reconciliation, "createOrUpdate failed", e.getCause());
updateStatus(reconciliation, (S) status).onComplete(statusResult -> {
createOrUpdate.fail(e.getCause());
});
} else {
LOGGER.errorCr(reconciliation, "createOrUpdate failed", res.cause());
createOrUpdate.fail(res.cause());
}
}
});
return createOrUpdate.future();
} else {
LOGGER.infoCr(reconciliation, "{} {} should be deleted", kind, name);
return delete(reconciliation).map(deleteResult -> {
if (deleteResult) {
LOGGER.infoCr(reconciliation, "{} {} deleted", kind, name);
} else {
LOGGER.infoCr(reconciliation, "Assembly {} or some parts of it will be deleted by garbage collection", name);
}
return (Void) null;
}).recover(deleteResult -> {
LOGGER.errorCr(reconciliation, "Deletion of {} {} failed", kind, name, deleteResult);
return Future.failedFuture(deleteResult);
});
}
});
Promise<Void> result = Promise.promise();
handler.onComplete(reconcileResult -> {
try {
handleResult(reconciliation, reconcileResult, reconciliationTimerSample);
} finally {
result.handle(reconcileResult);
}
});
return result.future();
}
use of io.strimzi.api.kafka.model.status.Status in project strimzi by strimzi.
the class OperatorMetricsTest method testDeleteCountsReconcile.
@Test
public void testDeleteCountsReconcile(VertxTestContext context) {
MetricsProvider metrics = createCleanMetricsProvider();
AbstractWatchableStatusedResourceOperator resourceOperator = new AbstractWatchableStatusedResourceOperator(vertx, null, "TestResource") {
@Override
protected MixedOperation operation() {
return null;
}
@Override
public HasMetadata get(String namespace, String name) {
return null;
}
@Override
public Future updateStatusAsync(Reconciliation reconciliation, HasMetadata resource) {
return null;
}
};
AbstractOperator operator = new AbstractOperator(vertx, "TestResource", resourceOperator, metrics, null) {
@Override
protected Future createOrUpdate(Reconciliation reconciliation, CustomResource resource) {
return null;
}
@Override
public Set<Condition> validate(Reconciliation reconciliation, CustomResource resource) {
// Do nothing
return emptySet();
}
@Override
protected Future<Boolean> delete(Reconciliation reconciliation) {
return Future.succeededFuture(Boolean.TRUE);
}
@Override
protected Status createStatus() {
return new Status() {
};
}
};
Checkpoint async = context.checkpoint();
operator.reconcile(new Reconciliation("test", "TestResource", "my-namespace", "my-resource")).onComplete(context.succeeding(v -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
Tag selectorTag = Tag.of("selector", "");
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().count(), is(1L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThrows(MeterNotFoundException.class, () -> {
registry.get(AbstractOperator.METRICS_PREFIX + "resource.state").tag("kind", "TestResource").tag("name", "my-resource").tag("resource-namespace", "my-namespace").gauge();
});
async.flag();
})));
}
use of io.strimzi.api.kafka.model.status.Status in project strimzi by strimzi.
the class OperatorMetricsTest method testPauseReconcile.
@Test
public void testPauseReconcile(VertxTestContext context) {
MetricsProvider metrics = createCleanMetricsProvider();
AbstractWatchableStatusedResourceOperator resourceOperator = resourceOperatorWithExistingPausedResource();
AbstractOperator operator = new AbstractOperator(vertx, "TestResource", resourceOperator, metrics, null) {
@Override
protected Future createOrUpdate(Reconciliation reconciliation, CustomResource resource) {
return Future.succeededFuture();
}
@Override
public Set<Condition> validate(Reconciliation reconciliation, CustomResource resource) {
return new HashSet<>();
}
@Override
protected Future<Boolean> delete(Reconciliation reconciliation) {
return null;
}
@Override
protected Status createStatus() {
return new Status() {
};
}
};
Checkpoint async = context.checkpoint();
operator.reconcile(new Reconciliation("test", "TestResource", "my-namespace", "my-resource")).onComplete(context.succeeding(v -> context.verify(() -> {
MeterRegistry registry = metrics.meterRegistry();
Tag selectorTag = Tag.of("selector", "");
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "TestResource").counter().count(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resources.paused").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resources.paused").tag("kind", "TestResource").gauge().value(), is(1.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").meter().getId().getTags().get(2), is(selectorTag));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().count(), is(1L));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resource.state").tag("kind", "TestResource").tag("name", "my-resource").tag("resource-namespace", "my-namespace").gauge().value(), is(1.0));
async.flag();
})));
}
Aggregations