use of com.evolveum.midpoint.schema.statistics.IterationItemInformation in project midpoint by Evolveum.
the class SimpleMockActivityRun method runLocally.
@Override
@NotNull
protected ActivityRunResult runLocally(OperationResult result) throws SchemaException, ObjectNotFoundException, ObjectAlreadyExistsException {
String message = activity.getWorkDefinition().getMessage();
Operation operation = activityState.getLiveItemProcessingStatistics().recordOperationStart(new IterativeOperationStartInfo(new IterationItemInformation(message, null, null, null)));
// to avoid wall clock time of 0 (failing throughput-assuming tests)
MiscUtil.sleepCatchingInterruptedException(10);
LOGGER.info("Message: {}", message);
getRecorder().recordExecution(message);
CommonMockActivityHelper helper = getActivityHandler().getMockHelper();
helper.increaseExecutionCount(activityState, result);
try {
helper.failIfNeeded(this, activity.getWorkDefinition().getInitialFailures());
QualifiedItemProcessingOutcomeType qualifiedOutcome = new QualifiedItemProcessingOutcomeType(getPrismContext()).outcome(ItemProcessingOutcomeType.SUCCESS);
operation.done(qualifiedOutcome, null);
incrementProgress(qualifiedOutcome);
} catch (Exception e) {
QualifiedItemProcessingOutcomeType qualifiedOutcome = new QualifiedItemProcessingOutcomeType(getPrismContext()).outcome(ItemProcessingOutcomeType.FAILURE);
operation.done(qualifiedOutcome, e);
incrementProgress(qualifiedOutcome);
throw e;
}
return standardRunResult();
}
use of com.evolveum.midpoint.schema.statistics.IterationItemInformation in project midpoint by Evolveum.
the class NodeCleaner method cleanupNodes.
/**
* Cleans up dead nodes older than specified age.
*
* @param selector If returns false, the respective node will not be removed.
*/
public void cleanupNodes(@NotNull DeadNodeCleanupPolicyType policy, @NotNull Predicate<NodeType> selector, @NotNull RunningTask task, @NotNull OperationResult result) throws SchemaException, ObjectNotFoundException {
if (policy.getMaxAge() == null) {
return;
}
TimeBoundary timeBoundary = TimeBoundary.compute(policy.getMaxAge());
XMLGregorianCalendar deleteNodesNotCheckedInAfter = timeBoundary.getBoundary();
LOGGER.info("Starting cleanup for stopped nodes not checked in after {} (duration '{}').", deleteNodesNotCheckedInAfter, timeBoundary.getPositiveDuration());
for (PrismObject<NodeType> node : clusterManager.getAllNodes(result)) {
if (!task.canRun()) {
result.recordWarning("Interrupted");
LOGGER.warn("Node cleanup was interrupted.");
break;
}
if (!clusterManager.isCurrentNode(node) && !clusterManager.isCheckingIn(node.asObjectable()) && XmlTypeConverter.compareMillis(node.asObjectable().getLastCheckInTime(), deleteNodesNotCheckedInAfter) <= 0) {
// This includes last check in time == null
LOGGER.info("Deleting dead node {}; last check in time = {}", node, node.asObjectable().getLastCheckInTime());
IterativeOperationStartInfo iterativeOperationStartInfo = new IterativeOperationStartInfo(new IterationItemInformation(node));
iterativeOperationStartInfo.setSimpleCaller(true);
Operation op = task.recordIterativeOperationStart(iterativeOperationStartInfo);
if (ObjectTypeUtil.isIndestructible(node)) {
LOGGER.debug("Not deleting dead but indestructible node {}", node);
op.skipped();
continue;
}
try {
// Selector testing is in try-catch because of possible exceptions during autz evaluation
if (!selector.test(node.asObjectable())) {
LOGGER.debug("Not deleting node {} because it was rejected by the selector", node);
op.skipped();
continue;
}
repositoryService.deleteObject(NodeType.class, node.getOid(), result);
op.succeeded();
} catch (Throwable t) {
op.failed(t);
LoggingUtils.logUnexpectedException(LOGGER, "Couldn't delete dead node {}", t, node);
}
task.incrementLegacyProgressAndStoreStatisticsIfTimePassed(result);
}
}
}
use of com.evolveum.midpoint.schema.statistics.IterationItemInformation in project midpoint by Evolveum.
the class ActivityItemProcessingStatistics method recordOperationStart.
/**
* Records an operation that has been just started. Stores it into the list of current operations.
* Returns an object that should receive the status of the operation, in order to record
* the operation end.
*/
public synchronized Operation recordOperationStart(IterativeOperationStartInfo startInfo) {
assertInitialized();
IterationItemInformation item = startInfo.getItem();
ProcessedItemType processedItem = new ProcessedItemType(PrismContext.get()).name(item.getObjectName()).displayName(item.getObjectDisplayName()).type(item.getObjectType()).oid(item.getObjectOid()).startTimestamp(XmlTypeConverter.createXMLGregorianCalendar(startInfo.getStartTimeMillis())).operationId(getNextOperationId());
List<ProcessedItemType> currentList = value.getCurrent();
currentList.add(processedItem);
LOGGER.trace("Recorded current operation. Current list size: {}. Operation: {}", currentList.size(), startInfo);
return new OperationImpl(startInfo, processedItem);
}
use of com.evolveum.midpoint.schema.statistics.IterationItemInformation in project midpoint by Evolveum.
the class ActivityReportUtil method addItemInformation.
/**
* Adds item-related information to a record that is related to processing of items.
*/
public static void addItemInformation(@NotNull ItemRelatedRecordType record, @Nullable ItemProcessingRequest<?> request, @Nullable WorkBucketType bucket) {
if (request != null) {
IterationItemInformation iterationItemInformation = request.getIterationItemInformation();
record.itemSequentialNumber(request.getSequentialNumber()).itemName(iterationItemInformation.getObjectName()).itemOid(iterationItemInformation.getObjectOid());
}
if (bucket != null) {
record.bucketSequentialNumber(bucket.getSequentialNumber());
}
}
use of com.evolveum.midpoint.schema.statistics.IterationItemInformation in project midpoint by Evolveum.
the class CaseCleaner method cleanupCases.
public void cleanupCases(@NotNull CleanupPolicyType policy, @NotNull RunningTask executionTask, @NotNull OperationResult result) throws CommonException {
DeletionCounters counters = new DeletionCounters();
TimeBoundary timeBoundary = TimeBoundary.compute(policy.getMaxAge());
XMLGregorianCalendar deleteCasesClosedUpTo = timeBoundary.boundary;
LOGGER.debug("Starting cleanup for closed cases deleting up to {} (duration '{}').", deleteCasesClosedUpTo, timeBoundary.positiveDuration);
ObjectQuery obsoleteCasesQuery = prismContext.queryFor(CaseType.class).item(CaseType.F_STATE).eq(SchemaConstants.CASE_STATE_CLOSED).and().item(CaseType.F_CLOSE_TIMESTAMP).le(deleteCasesClosedUpTo).and().item(CaseType.F_PARENT_REF).isNull().build();
List<PrismObject<CaseType>> rootObsoleteCases = modelService.searchObjects(CaseType.class, obsoleteCasesQuery, null, executionTask, result);
LOGGER.debug("Found {} case tree(s) to be cleaned up", rootObsoleteCases.size());
boolean interrupted = false;
for (PrismObject<CaseType> rootObsoleteCase : rootObsoleteCases) {
if (!executionTask.canRun()) {
result.recordWarning("Interrupted");
LOGGER.warn("Task cleanup was interrupted.");
interrupted = true;
break;
}
IterativeOperationStartInfo startInfo = new IterativeOperationStartInfo(new IterationItemInformation(rootObsoleteCase));
startInfo.setSimpleCaller(true);
Operation op = executionTask.recordIterativeOperationStart(startInfo);
try {
if (ObjectTypeUtil.isIndestructible(rootObsoleteCase)) {
LOGGER.trace("Not deleting root case {} because it's marked as indestructible", rootObsoleteCase);
op.skipped();
} else {
deleteCaseWithChildren(rootObsoleteCase, counters, executionTask, result);
op.succeeded();
}
} catch (Throwable t) {
op.failed(t);
LoggingUtils.logException(LOGGER, "Couldn't delete children cases for {}", t, rootObsoleteCase);
}
executionTask.incrementLegacyProgressAndStoreStatisticsIfTimePassed(result);
}
LOGGER.info("Case cleanup procedure " + (interrupted ? "was interrupted" : "finished") + ". Successfully deleted {} cases; there were problems with deleting {} cases.", counters.deleted, counters.problems);
String suffix = interrupted ? " Interrupted." : "";
if (counters.problems == 0) {
result.createSubresult(OP_STATISTICS).recordStatus(SUCCESS, "Successfully deleted " + counters.deleted + " case(s)." + suffix);
} else {
result.createSubresult(OP_STATISTICS).recordPartialError("Successfully deleted " + counters.deleted + " case(s), " + "there was problems with deleting " + counters.problems + " cases." + suffix);
}
}
Aggregations