use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class ClusterManager method determineClusterState.
@NotNull
public ClusterStateType determineClusterState(OperationResult result) throws SchemaException {
// We do not want to query cluster nodes at this moment. We rely on the repository information.
SearchResultList<PrismObject<NodeType>> nodes = getRepositoryService().searchObjects(NodeType.class, null, null, result);
ClusterStateType clusterState = new ClusterStateType(PrismContext.get());
// TODO use query after making operationalState indexed
for (PrismObject<NodeType> node : nodes) {
String nodeIdentifier = node.asObjectable().getNodeIdentifier();
if (node.asObjectable().getOperationalState() == NodeOperationalStateType.UP) {
clusterState.getNodeUp().add(nodeIdentifier);
}
if (taskManager.isUpAndAlive(node.asObjectable())) {
clusterState.getNodeUpAndAlive().add(nodeIdentifier);
}
}
return clusterState;
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class ClusterManager method checkClusterConfiguration.
/**
* Verifies cluster consistency (currently checks whether there is no other node with the same ID,
* and whether clustered/non-clustered nodes are OK).
*
* @return Current node record from repository, if everything is OK. Otherwise returns null.
*/
@Nullable
public NodeType checkClusterConfiguration(OperationResult result) {
// if error, sets the error state and stops the scheduler
NodeType currentNode = nodeRegistrar.verifyNodeObject(result);
// the same
nodeRegistrar.checkNonClusteredNodes(result);
return currentNode;
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class ClusterManager method checkNodeAliveness.
private void checkNodeAliveness(OperationResult result) throws SchemaException {
SearchResultList<PrismObject<NodeType>> nodes = getRepositoryService().searchObjects(NodeType.class, null, null, result);
Set<String> nodesMarkedAsDown = new HashSet<>();
for (PrismObject<NodeType> nodeObject : nodes) {
NodeType node = nodeObject.asObjectable();
if (isRemoteNode(node)) {
if (shouldBeMarkedAsDown(node)) {
if (markNodeAsDown(node, result)) {
LOGGER.warn("Node {} is down, marked it as such", node);
nodesMarkedAsDown.add(node.getNodeIdentifier());
}
} else if (isStartingForTooLong(node)) {
LOGGER.warn("Node {} is starting for too long. Last check-in time = {}", node, node.getLastCheckInTime());
// TODO should we mark this node as down?
}
}
}
taskStateManager.markTasksAsNotRunning(nodesMarkedAsDown, result);
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class RemoteSchedulers method stopRemoteScheduler.
public void stopRemoteScheduler(String nodeIdentifier, OperationResult result) throws SchemaException {
NodeType node = getNode(nodeIdentifier, result);
if (node == null) {
// result is already updated
return;
}
restConnector.stopRemoteScheduler(node, result);
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class RestConnector method addNodeStatus.
public void addNodeStatus(ClusterStatusInformation info, NodeType nodeInfo, OperationResult result) throws SchemaException {
clusterExecutionHelper.execute(nodeInfo, (client, actualNode, result1) -> {
client.path(TaskConstants.GET_LOCAL_SCHEDULER_INFORMATION_REST_PATH);
Response response = client.get();
Response.StatusType statusInfo = response.getStatusInfo();
LOGGER.debug("Querying remote scheduler information on {} finished with status {}: {}", nodeInfo.getNodeIdentifier(), statusInfo.getStatusCode(), statusInfo.getReasonPhrase());
if (statusInfo.getFamily() == Response.Status.Family.SUCCESSFUL) {
try {
SchedulerInformationType schedulerInfo = clusterExecutionHelper.extractResult(response, SchedulerInformationType.class);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Received from {}:\n{}", nodeInfo.getNodeIdentifier(), prismContext.xmlSerializer().serializeRealValue(schedulerInfo));
}
info.addNodeAndTaskInfo(schedulerInfo);
} catch (SchemaException e) {
LoggingUtils.logUnexpectedException(LOGGER, "Couldn't parse scheduler information from remote node {}", e, nodeInfo.getNodeIdentifier());
}
} else {
LOGGER.warn("Querying remote scheduler information on {} finished with status {}: {}", nodeInfo.getNodeIdentifier(), statusInfo.getStatusCode(), statusInfo.getReasonPhrase());
}
response.close();
}, null, "get scheduler information", result);
}
Aggregations