use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class UpAndDown method initInternal.
private void initInternal(OperationResult result) throws TaskManagerInitializationException {
LOGGER.info("Task Manager initialization.");
// get the configuration (general section + JDBC section as well)
configuration.checkAllowedKeys(midpointConfiguration);
configuration.setBasicInformation(midpointConfiguration, result);
configuration.validateBasicInformation();
LOGGER.info("Task Manager: Quartz Job Store: " + (configuration.isJdbcJobStore() ? "JDBC" : "in-memory") + ", " + (configuration.isClustered() ? "" : "NOT ") + "clustered. Threads: " + configuration.getThreads());
if (configuration.isJdbcJobStore()) {
// Let's find Quartz JDBC setup fallback (which will be used very likely)
JdbcRepositoryConfiguration jdbcConfig = null;
try {
jdbcConfig = beanFactory.getBean(JdbcRepositoryConfiguration.class);
} catch (NoSuchBeanDefinitionException e) {
LOGGER.info("JdbcRepositoryConfiguration is not available, JDBC Job Store" + " configuration will be taken from taskManager section only.");
LOGGER.trace("Reason is", e);
}
configuration.setJdbcJobStoreInformation(midpointConfiguration, jdbcConfig);
configuration.validateJdbcJobStoreInformation();
}
// register node
// may throw initialization exception
NodeType node = clusterManager.createOrUpdateNodeInRepo(result);
if (!configuration.isTestMode()) {
// in test mode do not start cluster manager thread nor verify cluster config
// Does not throw exceptions. Sets the ERROR state if necessary, however.
clusterManager.checkClusterConfiguration(result);
}
// unfortunately, there seems to be no clean way of letting jobs know the taskManager
JobExecutor.setTaskManagerQuartzImpl(taskManager);
// the same here
JobStarter.setTaskManagerQuartzImpl(taskManager);
localScheduler.initializeScheduler();
if (localNodeState.getErrorState() == NodeErrorStateType.OK) {
localScheduler.setLocalExecutionLimitations(node.getTaskExecutionLimitations());
} else {
localScheduler.shutdownScheduler();
}
// populate the scheduler with jobs (if RAM-based), or synchronize with midPoint repo
if (!taskSynchronizer.synchronizeJobStores(result)) {
if (!configuration.isJdbcJobStore()) {
LOGGER.error("Some or all tasks could not be imported from midPoint repository to Quartz job store. They will therefore not be executed.");
} else {
LOGGER.warn("Some or all tasks could not be synchronized between midPoint repository and Quartz job store. They may not function correctly.");
}
}
LOGGER.trace("Quartz scheduler initialized (not yet started, however)");
LOGGER.info("Task Manager initialized");
// if running in test mode, the postInit will not be executed... so we have to start scheduler here
if (configuration.isTestMode()) {
startSchedulerIfNeeded(result);
}
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class ConcurrentExecutionChecker method check.
public void check(OperationResult result) throws ObjectNotFoundException, SchemaException, StopJobException {
task.refresh(result);
String executingAtNode = task.getNode();
if (executingAtNode == null) {
LOGGER.trace("Current node is null, we assume no concurrent execution");
return;
}
LOGGER.debug("Task {} seems to be executing on node {}", task, executingAtNode);
if (executingAtNode.equals(beans.configuration.getNodeId())) {
RunningTaskQuartzImpl locallyRunningTask = beans.localNodeState.getLocallyRunningTaskByIdentifier(task.getTaskIdentifier());
if (locallyRunningTask != null) {
throw new StopJobException(ERROR, "Current task %s seems to be already running in thread %s on the" + " local node. We will NOT start it here.", null, task, locallyRunningTask.getExecutingThread());
} else {
LOGGER.warn("Current task {} seemed to be already running on the local node but it cannot be found" + " there now. Therefore we continue with the Quartz job execution.", task);
}
} else {
ObjectQuery query = beans.prismContext.queryFor(NodeType.class).item(NodeType.F_NODE_IDENTIFIER).eq(executingAtNode).build();
SearchResultList<PrismObject<NodeType>> nodes = beans.nodeRetriever.searchNodes(query, null, result);
if (nodes.size() > 1) {
throw new IllegalStateException("More than one node with identifier " + executingAtNode + ": " + nodes);
} else if (nodes.size() == 1) {
NodeType remoteNode = nodes.get(0).asObjectable();
if (beans.clusterManager.isCheckingIn(remoteNode)) {
// But let's keep things simple for the time being.
throw new StopJobException(ERROR, "Current task %s seems to be already running at node %s that is alive or starting. " + "We will NOT start it here.", null, task, remoteNode.getNodeIdentifier());
} else {
LOGGER.warn("Current task {} seems to be already running at node {} but this node is not currently " + "checking in (last: {}). So we will start the task here.", task, remoteNode.getNodeIdentifier(), remoteNode.getLastCheckInTime());
}
} else {
LOGGER.warn("Current task {} seems to be already running at node {} but this node cannot be found" + "in the repository. So we will start the task here.", task, executingAtNode);
}
}
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class LocalExecutionManager method getLocalSchedulerInformation.
public SchedulerInformationType getLocalSchedulerInformation(OperationResult result) {
SchedulerInformationType info = new SchedulerInformationType();
NodeType node = localNodeState.getLocalNode();
if (node != null) {
node.setSecret(null);
node.setSecretUpdateTimestamp(null);
node.setTaskExecutionLimitations(null);
}
info.setNode(node);
for (String oid : localScheduler.getLocallyRunningTasksOids(result)) {
TaskType task = new TaskType(taskManager.getPrismContext()).oid(oid);
info.getExecutingTask().add(task);
}
result.computeStatus();
return info;
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class RemoteSchedulers method startRemoteScheduler.
public void startRemoteScheduler(String nodeIdentifier, OperationResult parentResult) {
OperationResult result = parentResult.createSubresult(CLASS_DOT + ".startRemoteScheduler");
result.addParam("node", nodeIdentifier);
try {
NodeType node = getNode(nodeIdentifier, result);
if (node == null) {
// result is already updated
return;
}
restConnector.startRemoteScheduler(node, result);
} catch (Throwable t) {
result.recordFatalError("Couldn't start scheduler on remote node", t);
LoggingUtils.logUnexpectedException(LOGGER, "Couldn't start scheduler on remote node: {}", t, nodeIdentifier);
// TODO throw the exception?
} finally {
result.computeStatusIfUnknown();
}
}
use of com.evolveum.midpoint.xml.ns._public.common.common_3.NodeType in project midpoint by Evolveum.
the class NodeAuthenticationEvaluatorImpl method getMatchingNodes.
private List<PrismObject<NodeType>> getMatchingNodes(List<PrismObject<NodeType>> knownNodes, String remoteName, String remoteAddress) {
LOGGER.trace("Selecting matching node(s) for remote name '{}' and remote address '{}'", remoteName, remoteAddress);
List<PrismObject<NodeType>> matchingNodes = new ArrayList<>();
for (PrismObject<NodeType> node : knownNodes) {
NodeType actualNode = node.asObjectable();
if (actualNode.getOperationalState() == NodeOperationalStateType.DOWN) {
// Note that we consider nodes that are STARTING as eligible for authentication (they can issue REST calls)
LOGGER.trace("Skipping {} because it has operationalState=DOWN", actualNode);
} else if (remoteName != null && remoteName.equalsIgnoreCase(actualNode.getHostname())) {
LOGGER.trace("The node {} was recognized as a known node (remote host name {} matched).", actualNode.getName(), actualNode.getHostname());
matchingNodes.add(node);
} else if (actualNode.getIpAddress().contains(remoteAddress)) {
LOGGER.trace("The node {} was recognized as a known node (remote host address {} matched).", actualNode.getName(), remoteAddress);
matchingNodes.add(node);
}
}
// We should eliminate "not checking in" nodes if there are more possibilities
if (matchingNodes.size() > 1) {
List<PrismObject<NodeType>> up = matchingNodes.stream().filter(node -> taskManager.isCheckingIn(node.asObjectable())).collect(Collectors.toList());
LOGGER.trace("Tried to eliminate nodes that are not checking in; found {} node(s) that are up: {}", up.size(), up);
if (up.size() == 1) {
return up;
}
// Nothing reasonable can be done here. Let's return all the nodes.
}
return matchingNodes;
}
Aggregations