use of com.evolveum.midpoint.task.api.RunningTask in project midpoint by Evolveum.
the class StatisticsLogger method logBucketCompletion.
void logBucketCompletion(boolean complete) {
TransientActivityRunStatistics current = activityRun.getTransientRunStatistics();
long end = System.currentTimeMillis();
ActivityPerformanceInformation overall = getOverallStatistics();
RunningTask task = activityRun.getRunningTask();
String mainMessage = String.format("%s bucket #%d for %s (%s in %s).%s", complete ? "Completed" : "Partially processed", activityRun.getBucket().getSequentialNumber(), activityRun.getShortNameUncapitalized(), activityRun.getActivityPath().toDebugName(), task, complete ? "" : " Bucket processing was interrupted.");
String currentBrief = String.format(Locale.US, "Current run: processed %,d objects in %.1f seconds, got %,d errors.", current.getItemsProcessed(), current.getWallClockTime(end) / 1000.0, current.getErrors());
if (current.getItemsProcessed() > 0) {
currentBrief += String.format(Locale.US, " Average processing time for one object: %,.1f milliseconds. " + "Wall clock average: %,.1f milliseconds, throughput: %,.1f items per minute.", current.getAverageTime(), current.getAverageWallClockTime(end), current.getThroughput(end));
}
Long wallClockTime = overall.getWallClockTime();
// Wall-clock time information is not available e.g. for activities with persistent state (like LiveSync)
boolean hasWallClockTime = wallClockTime != null && wallClockTime > 0;
String wallClockTimeString;
if (hasWallClockTime) {
wallClockTimeString = String.format(Locale.US, " in %.1f seconds", ActivityItemProcessingStatisticsUtil.toSeconds(wallClockTime));
} else {
wallClockTimeString = "";
}
String overallBrief = String.format(Locale.US, "Overall: processed %,d objects%s, got %,d errors. Real progress: %,d.", overall.getItemsProcessed(), wallClockTimeString, overall.getErrors(), overall.getProgress());
if (overall.getItemsProcessed() > 0) {
overallBrief += String.format(Locale.US, " Average processing time for one object: %,.1f milliseconds.", overall.getAverageTime());
if (hasWallClockTime && overall.getAverageWallClockTime() != null) {
overallBrief += String.format(Locale.US, " Wall clock average: %,.1f milliseconds, throughput: %,.1f items per minute.", overall.getAverageWallClockTime(), overall.getThroughput());
}
}
String mainMessageAddition = "\n" + currentBrief + "\n" + overallBrief;
String fullStats = getFullStatMessage(overall, end);
log(ActivityReportingDefinition::getBucketCompletionLogging, mainMessage, mainMessageAddition, fullStats);
}
use of com.evolveum.midpoint.task.api.RunningTask in project midpoint by Evolveum.
the class IterativeActivityRun method doRun.
/**
* Bucketed version of the run.
*/
private void doRun(OperationResult result) throws ActivityRunException, CommonException {
RunningTask task = taskRun.getRunningTask();
boolean initialRun = true;
bucketingSituation = determineBucketingSituation();
setExpectedTotal(result);
for (; task.canRun(); initialRun = false) {
bucket = getWorkBucket(initialRun, result);
if (bucket == null) {
LOGGER.trace("No (next) work bucket within {}, exiting", task);
break;
}
boolean complete = false;
try {
if (!task.canRun()) {
break;
}
complete = processOrAnalyzeOrSkipSingleBucket(result);
if (!complete) {
break;
}
} finally {
if (!complete) {
// This is either when the task was stopped (canRun is false or there's an stopping exception)
// or an unhandled exception occurred.
//
// This most probably means that the task is going to be suspended. So let us release the buckets
// to allow their processing by other workers.
releaseAllBucketsWhenWorker(result);
}
}
}
}
use of com.evolveum.midpoint.task.api.RunningTask in project midpoint by Evolveum.
the class IterativeActivityRun method updateStatistics.
/**
* Updates statistics in the coordinator task (including TL if it's safe to do so).
*
* If needed, also updates the statistics in the repository.
*
* Statistics updated in the task:
* - task.operationStats,
* - progress (both activity-based and legacy),
* - activity statistics: items, synchronization, actions executed, bucketing operations
*
* Note that using modifyObjectDynamically would be perhaps better, but the current use of last update timestamp
* ensures that there will not be concurrent updates of the coordinator coming from its worker threads.
*/
public void updateStatistics(boolean updateThreadLocalStatistics, OperationResult result) throws SchemaException, ObjectNotFoundException {
RunningTask coordinatorTask = getRunningTask();
coordinatorTask.updateOperationStatsInTaskPrism(updateThreadLocalStatistics);
coordinatorTask.storeStatisticsIntoRepositoryIfTimePassed(getActivityStatUpdater(), result);
}
use of com.evolveum.midpoint.task.api.RunningTask in project midpoint by Evolveum.
the class LocalActivityRun method setTaskObjectRef.
/**
* Updates task objectRef. This is e.g. to allow displaying of all tasks related to given resource.
* Conditions:
*
* 1. task.objectRef has no value yet,
* 2. the value provided by {@link #getDesiredTaskObjectRef()} is non-null.
*
* The method does this recursively towards the root of the task tree.
*/
@Experimental
final void setTaskObjectRef(OperationResult result) throws CommonException {
RunningTask task = getRunningTask();
if (task.getObjectOid() != null) {
LOGGER.trace("Task.objectRef is already set for the current task. We assume it is also set for parent tasks.");
return;
}
ObjectReferenceType desiredObjectRef = getDesiredTaskObjectRef();
LOGGER.trace("Desired task object ref: {}", desiredObjectRef);
if (desiredObjectRef == null) {
return;
}
setObjectRefRecursivelyUpwards(task, desiredObjectRef, result);
}
use of com.evolveum.midpoint.task.api.RunningTask in project midpoint by Evolveum.
the class TaskCleaner method cleanupTasks.
public void cleanupTasks(@NotNull CleanupPolicyType policy, @NotNull Predicate<TaskType> selector, @NotNull RunningTask executionTask, @NotNull OperationResult result) throws SchemaException, ObjectNotFoundException {
if (policy.getMaxAge() == null) {
return;
}
TimeBoundary timeBoundary = TimeBoundary.compute(policy.getMaxAge());
XMLGregorianCalendar deleteTasksClosedUpTo = timeBoundary.getBoundary();
LOGGER.info("Starting cleanup for closed tasks deleting up to {} (duration '{}').", deleteTasksClosedUpTo, timeBoundary.getPositiveDuration());
ObjectQuery obsoleteTasksQuery = prismContext.queryFor(TaskType.class).item(TaskType.F_EXECUTION_STATE).eq(TaskExecutionStateType.CLOSED).and().item(TaskType.F_COMPLETION_TIMESTAMP).le(deleteTasksClosedUpTo).and().item(TaskType.F_PARENT).isNull().build();
List<PrismObject<TaskType>> obsoleteTasks = repositoryService.searchObjects(TaskType.class, obsoleteTasksQuery, null, result);
LOGGER.debug("Found {} task tree(s) to be cleaned up", obsoleteTasks.size());
boolean interrupted = false;
int deleted = 0;
int problems = 0;
int subtasksProblems = 0;
root: for (PrismObject<TaskType> rootTaskPrism : obsoleteTasks) {
if (!executionTask.canRun()) {
result.recordWarning("Interrupted");
LOGGER.warn("Task cleanup was interrupted.");
interrupted = true;
break;
}
IterativeOperationStartInfo iterativeOperationStartInfo = new IterativeOperationStartInfo(new IterationItemInformation(rootTaskPrism));
iterativeOperationStartInfo.setSimpleCaller(true);
Operation op = executionTask.recordIterativeOperationStart(iterativeOperationStartInfo);
try {
// get whole tree
TaskQuartzImpl rootTask = taskInstantiator.createTaskInstance(rootTaskPrism, result);
if (rootTask.isIndestructible()) {
LOGGER.trace("Not deleting {} as it is indestructible", rootTaskPrism);
op.skipped();
continue;
}
if (!selector.test(rootTaskPrism.asObjectable())) {
LOGGER.debug("Not deleting {} because it was rejected by the selector", rootTaskPrism);
op.skipped();
continue;
}
List<TaskQuartzImpl> taskTreeMembers = rootTask.listSubtasksDeeply(true, result);
for (TaskQuartzImpl child : taskTreeMembers) {
if (child.isIndestructible()) {
LOGGER.trace("Not deleting {} as it has an indestructible child: {}", rootTask, child);
op.skipped();
continue root;
}
if (!selector.test(child.getRawTaskObject().asObjectable())) {
LOGGER.debug("Not deleting {} because the user has no authorization to delete one of the children: {}", rootTask, child);
op.skipped();
continue root;
}
}
taskTreeMembers.add(rootTask);
LOGGER.trace("Removing task {} along with its {} children.", rootTask, taskTreeMembers.size() - 1);
Throwable lastProblem = null;
for (Task task : taskTreeMembers) {
try {
// TODO use repository service only - the task should be closed now
taskStateManager.deleteTask(task.getOid(), result);
deleted++;
} catch (SchemaException | ObjectNotFoundException | RuntimeException e) {
LoggingUtils.logUnexpectedException(LOGGER, "Couldn't delete obsolete task {}", e, task);
lastProblem = e;
problems++;
if (!task.getTaskIdentifier().equals(rootTask.getTaskIdentifier())) {
subtasksProblems++;
}
}
}
// approximate solution (as the problem might be connected to a subtask)
if (lastProblem != null) {
op.failed(lastProblem);
} else {
op.succeeded();
}
} catch (Throwable t) {
op.failed(t);
throw t;
}
// structured progress is incremented with iterative operation reporting
executionTask.incrementLegacyProgressAndStoreStatisticsIfTimePassed(result);
}
LOGGER.info("Task cleanup procedure " + (interrupted ? "was interrupted" : "finished") + ". Successfully deleted {} tasks; there were problems with deleting {} tasks.", deleted, problems);
if (subtasksProblems > 0) {
LOGGER.error("{} subtask(s) couldn't be deleted. Inspect that manually, otherwise they might reside in repo forever.", subtasksProblems);
}
String suffix = interrupted ? " Interrupted." : "";
if (problems == 0) {
result.createSubresult(OP_STATISTICS).recordStatus(SUCCESS, "Successfully deleted " + deleted + " task(s)." + suffix);
} else {
result.createSubresult(OP_STATISTICS).recordPartialError("Successfully deleted " + deleted + " task(s), " + "there was problems with deleting " + problems + " tasks." + suffix + (subtasksProblems > 0 ? (" " + subtasksProblems + " subtask(s) couldn't be deleted, please see the log.") : ""));
}
}
Aggregations