use of io.strimzi.api.kafka.model.status.KafkaConnectorStatus in project strimzi by strimzi.
the class CustomResourceStatusIsolatedST method assertKafkaConnectorStatus.
@SuppressWarnings("unchecked")
void assertKafkaConnectorStatus(long expectedObservedGeneration, String connectorStates, String type, List<String> topics) {
KafkaConnectorStatus kafkaConnectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(Constants.INFRA_NAMESPACE).withName(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME).get().getStatus();
assertThat(kafkaConnectorStatus.getObservedGeneration(), is(expectedObservedGeneration));
Map<String, Object> connectorStatus = kafkaConnectorStatus.getConnectorStatus();
String currentState = ((LinkedHashMap<String, String>) connectorStatus.get("connector")).get("state");
assertThat(connectorStates, containsString(currentState));
assertThat(connectorStatus.get("name"), is(CUSTOM_RESOURCE_STATUS_CLUSTER_NAME));
assertThat(connectorStatus.get("type"), is(type));
assertThat(connectorStatus.get("tasks"), notNullValue());
assertThat(kafkaConnectorStatus.getTopics(), is(topics));
}
use of io.strimzi.api.kafka.model.status.KafkaConnectorStatus in project strimzi by strimzi.
the class AbstractConnectOperator method maybeUpdateConnectorStatus.
Future<Void> maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConnector connector, ConnectorStatusAndConditions connectorStatus, Throwable error) {
KafkaConnectorStatus status = new KafkaConnectorStatus();
if (error != null) {
LOGGER.warnCr(reconciliation, "Error reconciling connector {}", connector.getMetadata().getName(), error);
}
Map<String, Object> statusResult = null;
List<String> topics = new ArrayList<>();
List<Condition> conditions = new ArrayList<>();
if (connectorStatus != null) {
statusResult = connectorStatus.statusResult;
topics = connectorStatus.topics.stream().sorted().collect(Collectors.toList());
connectorStatus.conditions.forEach(condition -> conditions.add(condition));
}
Set<Condition> unknownAndDeprecatedConditions = validate(reconciliation, connector);
unknownAndDeprecatedConditions.forEach(condition -> conditions.add(condition));
if (!Annotations.isReconciliationPausedWithAnnotation(connector)) {
StatusUtils.setStatusConditionAndObservedGeneration(connector, status, error != null ? Future.failedFuture(error) : Future.succeededFuture());
status.setConnectorStatus(statusResult);
status.setTasksMax(getActualTaskCount(connector, statusResult));
status.setTopics(topics);
} else {
status.setObservedGeneration(connector.getStatus() != null ? connector.getStatus().getObservedGeneration() : 0);
conditions.add(StatusUtils.getPausedCondition());
}
status.addConditions(conditions);
return maybeUpdateStatusCommon(connectorOperator, connector, reconciliation, status, (connector1, status1) -> {
return new KafkaConnectorBuilder(connector1).withStatus(status1).build();
});
}
use of io.strimzi.api.kafka.model.status.KafkaConnectorStatus in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method updateStatus.
public static void updateStatus(Reconciliation reconciliation, Throwable error, KafkaConnector kafkaConnector2, CrdOperator<?, KafkaConnector, ?> connectorOperations) {
KafkaConnectorStatus status = new KafkaConnectorStatus();
StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnector2, status, error);
StatusDiff diff = new StatusDiff(kafkaConnector2.getStatus(), status);
if (!diff.isEmpty()) {
KafkaConnector copy = new KafkaConnectorBuilder(kafkaConnector2).build();
copy.setStatus(status);
connectorOperations.updateStatusAsync(reconciliation, copy);
}
}
use of io.strimzi.api.kafka.model.status.KafkaConnectorStatus in project strimzi-kafka-operator by strimzi.
the class AbstractConnectOperator method maybeUpdateConnectorStatus.
Future<Void> maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConnector connector, ConnectorStatusAndConditions connectorStatus, Throwable error) {
KafkaConnectorStatus status = new KafkaConnectorStatus();
if (error != null) {
LOGGER.warnCr(reconciliation, "Error reconciling connector {}", connector.getMetadata().getName(), error);
}
Map<String, Object> statusResult = null;
List<String> topics = new ArrayList<>();
List<Condition> conditions = new ArrayList<>();
if (connectorStatus != null) {
statusResult = connectorStatus.statusResult;
topics = connectorStatus.topics.stream().sorted().collect(Collectors.toList());
connectorStatus.conditions.forEach(condition -> conditions.add(condition));
}
Set<Condition> unknownAndDeprecatedConditions = validate(reconciliation, connector);
unknownAndDeprecatedConditions.forEach(condition -> conditions.add(condition));
if (!Annotations.isReconciliationPausedWithAnnotation(connector)) {
StatusUtils.setStatusConditionAndObservedGeneration(connector, status, error != null ? Future.failedFuture(error) : Future.succeededFuture());
status.setConnectorStatus(statusResult);
status.setTasksMax(getActualTaskCount(connector, statusResult));
status.setTopics(topics);
} else {
status.setObservedGeneration(connector.getStatus() != null ? connector.getStatus().getObservedGeneration() : 0);
conditions.add(StatusUtils.getPausedCondition());
}
status.addConditions(conditions);
return maybeUpdateStatusCommon(connectorOperator, connector, reconciliation, status, (connector1, status1) -> {
return new KafkaConnectorBuilder(connector1).withStatus(status1).build();
});
}
use of io.strimzi.api.kafka.model.status.KafkaConnectorStatus in project strimzi-kafka-operator by strimzi.
the class ConnectIsolatedST method testScaleConnectWithConnectorToZero.
@ParallelNamespaceTest
@Tag(SCALABILITY)
@Tag(CONNECTOR_OPERATOR)
void testScaleConnectWithConnectorToZero(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(INFRA_NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String kafkaClientsName = mapWithKafkaClientNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, kafkaClientsName).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
resourceManager.createResource(extensionContext, KafkaConnectTemplates.kafkaConnect(extensionContext, clusterName, 2).editMetadata().addToAnnotations(Annotations.STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").endMetadata().build());
resourceManager.createResource(extensionContext, KafkaConnectorTemplates.kafkaConnector(clusterName).editSpec().withClassName("org.apache.kafka.connect.file.FileStreamSinkConnector").addToConfig("file", Constants.DEFAULT_SINK_FILE_PATH).addToConfig("key.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("value.converter", "org.apache.kafka.connect.storage.StringConverter").addToConfig("topics", topicName).endSpec().build());
String connectDeploymentName = KafkaConnectResources.deploymentName(clusterName);
List<Pod> connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaConnectResources.deploymentName(clusterName));
assertThat(connectPods.size(), is(2));
// scale down
LOGGER.info("Scaling KafkaConnect down to zero");
KafkaConnectResource.replaceKafkaConnectResourceInSpecificNamespace(clusterName, kafkaConnect -> kafkaConnect.getSpec().setReplicas(0), namespaceName);
KafkaConnectUtils.waitForConnectReady(namespaceName, clusterName);
PodUtils.waitForPodsReady(namespaceName, kubeClient(namespaceName).getDeploymentSelectors(namespaceName, connectDeploymentName), 0, true);
connectPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, connectDeploymentName);
KafkaConnectStatus connectStatus = KafkaConnectResource.kafkaConnectClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
KafkaConnectorStatus connectorStatus = KafkaConnectorResource.kafkaConnectorClient().inNamespace(namespaceName).withName(clusterName).get().getStatus();
assertThat(connectPods.size(), is(0));
assertThat(connectStatus.getConditions().get(0).getType(), is(Ready.toString()));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getType().equals(NotReady.toString())), is(true));
assertThat(connectorStatus.getConditions().stream().anyMatch(condition -> condition.getMessage().contains("has 0 replicas")), is(true));
}
Aggregations