use of com.emc.storageos.db.client.model.Task in project coprhd-controller by CoprHD.
the class TaskScrubberExecutor method deleteOldCompletedTasksForTenant.
/**
* deletes old completed tasks for a tenant
* @param tenantId tenant to delete tasks for
* @param startTimeMarker time that determines which tasks are old
* @return the number of tasks deleted
*/
private int deleteOldCompletedTasksForTenant(URI tenantId, Calendar startTimeMarker) {
log.debug("deleting completed tasks for tenant {}", tenantId);
int tasksDeleted = 0;
Map<String, List<URI>> batchedIds = findTasksForTenantNotPending(tenantId);
for (Entry<String, List<URI>> entry : batchedIds.entrySet()) {
String batch = entry.getKey();
List<URI> ids = entry.getValue();
log.debug("processing batch {} with {} completed tasks for tenant {}", batch, ids.size(), tenantId);
try {
Iterator<Task> tasks = dbClient.queryIterativeObjects(Task.class, ids, true);
List<Task> toBeDeleted = Lists.newArrayList();
while (tasks.hasNext()) {
Task task = tasks.next();
if (task == null || (task.getCreationTime() != null && task.getCreationTime().after(startTimeMarker))) {
continue;
}
if (!task.isPending()) {
tasksDeleted++;
toBeDeleted.add(task);
}
if (toBeDeleted.size() >= DELETE_BATCH_SIZE) {
removeTasks(toBeDeleted, tenantId);
toBeDeleted.clear();
}
// have many tasks that now can be cleaned up
if (tasksDeleted >= MAXIMUM_TASK_TO_DELETE) {
break;
}
}
if (!toBeDeleted.isEmpty()) {
removeTasks(toBeDeleted, tenantId);
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
log.debug("done deleting completed tasks for tenant {}", tenantId);
return tasksDeleted;
}
use of com.emc.storageos.db.client.model.Task in project coprhd-controller by CoprHD.
the class DbClientTest method testTaskCleanup.
@Test
public void testTaskCleanup() throws Exception {
_logger.info("Starting testTaskCleanup");
int taskCount = 100;
TenantOrg tenant = new TenantOrg();
tenant.setId(URIUtil.createId(TenantOrg.class));
tenant.setLabel("tenant-unit-test");
_dbClient.createObject(tenant);
List<Project> projects = createProjects(1, tenant);
List<Volume> volumes = createVolumes(1, "foobar", projects.get(0));
Volume volume = volumes.get(0);
volume.setTenant(new NamedURI(tenant.getId(), tenant.getLabel()));
DbClient dbClient = _dbClient;
Calendar twoWeeksAgo = Calendar.getInstance();
twoWeeksAgo.setTime(Date.from(LocalDateTime.now().minusWeeks(2).atZone(ZoneId.systemDefault()).toInstant()));
Calendar sixWeeksAgo = Calendar.getInstance();
sixWeeksAgo.setTime(Date.from(LocalDateTime.now().minusWeeks(6).atZone(ZoneId.systemDefault()).toInstant()));
Calendar sixMonthsAgo = Calendar.getInstance();
sixMonthsAgo.setTime(Date.from(LocalDateTime.now().minusMonths(6).atZone(ZoneId.systemDefault()).toInstant()));
for (int i = 0; i < taskCount; i++) {
Task task = new Task();
task.setId(URIUtil.createId(Task.class));
task.setCompletedFlag(false);
task.setDescription("test task");
task.setLabel("TEST TASK");
task.setMessage("test task");
task.setProgress(0);
task.setRequestId(UUID.randomUUID().toString());
task.setResource(new NamedURI(volume.getId(), volume.getLabel()));
task.setStartTime(sixWeeksAgo);
task.setStatus(Task.Status.pending.toString());
task.setTenant(volume.getTenant().getURI());
dbClient.createObject(task);
}
Iterator<Task> pendingTaskItr = TaskUtils.findPendingTasksForResource(dbClient, volume.getId(), volume.getTenant().getURI());
Assert.assertEquals(iteratorCount(pendingTaskItr), taskCount);
TaskUtils.cleanupPendingTasks(dbClient, volume.getId(), "TEST TASK", volume.getTenant().getURI(), sixMonthsAgo);
pendingTaskItr = TaskUtils.findPendingTasksForResource(dbClient, volume.getId(), volume.getTenant().getURI());
Assert.assertEquals(iteratorCount(pendingTaskItr), taskCount);
TaskUtils.cleanupPendingTasks(dbClient, volume.getId(), "TEST TASK", volume.getTenant().getURI(), twoWeeksAgo);
pendingTaskItr = TaskUtils.findPendingTasksForResource(dbClient, volume.getId(), volume.getTenant().getURI());
Assert.assertEquals(iteratorCount(pendingTaskItr), 0);
for (int i = 0; i < taskCount; i++) {
Task task = new Task();
task.setId(URIUtil.createId(Task.class));
task.setCompletedFlag(false);
task.setDescription("test task");
task.setLabel("TEST TASK");
task.setMessage("test task");
task.setProgress(0);
task.setRequestId(UUID.randomUUID().toString());
task.setResource(new NamedURI(volume.getId(), volume.getLabel()));
task.setStartTime(sixWeeksAgo);
task.setStatus(Task.Status.pending.toString());
task.setTenant(volume.getTenant().getURI());
dbClient.createObject(task);
}
pendingTaskItr = TaskUtils.findPendingTasksForResource(dbClient, volume.getId(), volume.getTenant().getURI());
Assert.assertEquals(iteratorCount(pendingTaskItr), taskCount);
TaskUtils.cleanupPendingTasks(dbClient, volume.getId(), "TEST TASK", volume.getTenant().getURI());
pendingTaskItr = TaskUtils.findPendingTasksForResource(dbClient, volume.getId(), volume.getTenant().getURI());
Assert.assertEquals(iteratorCount(pendingTaskItr), 0);
}
use of com.emc.storageos.db.client.model.Task in project coprhd-controller by CoprHD.
the class DbClientImpl method serializeTasks.
private void serializeTasks(DataObject dataObject, RowMutator mutator, List<URI> objectsToCleanup) {
OpStatusMap statusMap = dataObject.getOpStatus();
if (statusMap == null || statusMap.getChangedKeySet() == null || statusMap.getChangedKeySet().isEmpty()) {
return;
}
Set<String> addedSet = statusMap.getChangedKeySet();
if (addedSet != null) {
DataObjectType taskDoType = TypeMap.getDoType(Task.class);
Iterator<String> it = statusMap.getChangedKeySet().iterator();
while (it.hasNext()) {
String requestId = it.next();
Operation operation = statusMap.get(requestId);
Task task = TaskUtils.findTaskForRequestId(this, dataObject.getId(), requestId);
if (task == null) {
// Task doesn't currently exist for this id, so create it
task = new Task();
task.setId(URIUtil.createId(Task.class));
task.setRequestId(requestId);
task.setInactive(false);
task.setServiceCode(operation.getServiceCode());
task.setLabel(operation.getName());
task.setStatus(operation.getStatus());
task.setDescription(operation.getDescription());
Integer progress = operation.getProgress();
task.setProgress(progress != null ? progress : 0);
task.setMessage(operation.getMessage());
task.setAssociatedResources(operation.rawAssociatedResources());
task.setCreationTime(Calendar.getInstance());
task.setInactive(false);
task.setStartTime(operation.getStartTime());
task.setEndTime(getEndTime(operation));
// Often dummy objects are used that just contain an ID, for some things we need access to the entire object
DataObject loadedObject = dataObject;
if (StringUtils.isBlank(dataObject.getLabel())) {
loadedObject = this.queryObject(URIUtil.getModelClass(dataObject.getId()), dataObject.getId());
}
if (loadedObject == null) {
throw new RuntimeException("Task created on a resource which doesn't exist " + dataObject.getId());
}
task.setResource(new NamedURI(loadedObject.getId(), loadedObject.getLabel()));
URI tenantId = getTenantURI(loadedObject);
if (tenantId == null) {
task.setTenant(TenantOrg.SYSTEM_TENANT);
} else {
task.setTenant(tenantId);
}
_log.info("Created task {}, {}", task.getId() + " (" + task.getRequestId() + ")", task.getLabel());
} else {
// Task exists so update it
task.setServiceCode(operation.getServiceCode());
task.setStatus(operation.getStatus());
task.setMessage(operation.getMessage());
// Some code isn't updating progress to 100 when completed, so fix this here
if (Objects.equal(task.getStatus(), "pending") || Objects.equal(task.getStatus(), "suspended_no_error") || Objects.equal(task.getStatus(), "suspended_error")) {
task.setProgress(operation.getProgress());
} else {
task.setProgress(COMPLETED_PROGRESS);
}
task.setStartTime(operation.getStartTime());
task.setEndTime(getEndTime(operation));
task.setAssociatedResources(operation.rawAssociatedResources());
if (!Objects.equal(task.getStatus(), "pending")) {
_log.info("Completed task {}, {}", task.getId() + " (" + task.getRequestId() + ")", task.getStatus());
}
}
if (taskDoType.serialize(mutator, task)) {
objectsToCleanup.add(task.getId());
}
operation.addTask(dataObject.getId(), task);
}
}
}
use of com.emc.storageos.db.client.model.Task in project coprhd-controller by CoprHD.
the class DbClientImpl method updateTaskStatus.
private Operation updateTaskStatus(Class<? extends DataObject> clazz, URI id, String opId, Operation updateOperation, boolean resetStartTime) {
List<URI> ids = new ArrayList<URI>(Arrays.asList(id));
List<? extends DataObject> objs = queryObjectField(clazz, "status", ids);
if (objs == null || objs.isEmpty()) {
// When "status" map is null (empty) we do not get object when query by the map field name in CF.
// Try to get object by id.
objs = queryObject(clazz, ids);
if (objs == null || objs.isEmpty()) {
_log.error("Cannot find object {} in {}", id, clazz.getSimpleName());
return null;
}
_log.info("Object {} has empty status map", id);
}
DataObject doobj = objs.get(0);
_log.info(String.format("Updating operation %s for object %s with status %s", opId, doobj.getId(), updateOperation.getStatus()));
Operation op = doobj.getOpStatus().updateTaskStatus(opId, updateOperation, resetStartTime);
if (op == null) {
// OpStatusMap does not have entry for a given opId. The entry already expired based on ttl.
// Recreate the entry for this opId from the task object and proceed with update
_log.info("Operation map for object {} does not have entry for operation id {}", doobj.getId(), opId);
Task task = TaskUtils.findTaskForRequestId(this, doobj.getId(), opId);
if (task != null) {
_log.info(String.format("Creating operation %s for object %s from task instance %s", opId, doobj.getId(), task.getId()));
// Create operation instance for the task
Operation operation = TaskUtils.createOperation(task);
doobj.getOpStatus().createTaskStatus(opId, operation);
op = doobj.getOpStatus().updateTaskStatus(opId, updateOperation, false);
if (op == null) {
_log.error(String.format("Failed to update operation %s for object %s ", opId, doobj.getId()));
return null;
}
} else {
_log.warn(String.format("Task for operation %s and object %s does not exist.", opId, doobj.getId()));
return null;
}
}
persistObject(doobj);
return op;
}
use of com.emc.storageos.db.client.model.Task in project coprhd-controller by CoprHD.
the class WorkflowService method logWorkflow.
/**
* Persist the Cassandra logging record for the Workflow
*
* @param workflow
* @param completed
* - If true, assumes the Workflow has been completed
* (reached a terminal state).
*/
void logWorkflow(Workflow workflow, boolean completed) {
try {
boolean created = false;
com.emc.storageos.db.client.model.Workflow logWorkflow = null;
if (workflow._workflowURI != null) {
logWorkflow = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow._workflowURI);
} else {
workflow._workflowURI = URIUtil.createId(com.emc.storageos.db.client.model.Workflow.class);
}
// Are we updating or adding?
if (logWorkflow == null) {
created = true;
logWorkflow = new com.emc.storageos.db.client.model.Workflow();
logWorkflow.setId(workflow._workflowURI);
logWorkflow.setCreationTime(Calendar.getInstance());
logWorkflow.setCompleted(false);
}
logWorkflow.setOrchControllerName(workflow._orchControllerName);
logWorkflow.setOrchMethod(workflow._orchMethod);
logWorkflow.setOrchTaskId(workflow._orchTaskId);
logWorkflow.setCompleted(completed);
if (completed) {
// If completed, log the final state and error message.
try {
Map<String, StepStatus> statusMap = workflow.getAllStepStatus();
String[] errorMessage = new String[] { workflow._successMessage };
Workflow.getOverallState(statusMap, errorMessage);
WorkflowState state = workflow.getWorkflowState();
logWorkflow.setCompletionState(state.name());
logWorkflow.setCompletionMessage(errorMessage[0]);
} catch (WorkflowException ex) {
_log.error(ex.getMessage(), ex);
}
}
if (created) {
_dbClient.createObject(logWorkflow);
} else {
_dbClient.updateObject(logWorkflow);
}
if (workflow.getOrchTaskId() != null) {
List<Task> tasks = new ArrayList<>();
if (workflow._taskCompleter != null && workflow._taskCompleter.getId() != null) {
Set<URI> taskIds = new HashSet<>();
// as migrating a non-CG virtual volume.
for (URI resourceId : workflow._taskCompleter.getIds()) {
Task task = TaskUtils.findTaskForRequestId(_dbClient, resourceId, workflow.getOrchTaskId());
if (task != null && !taskIds.contains(task.getId())) {
tasks.add(task);
taskIds.add(task.getId());
}
}
// instance)
for (URI resourceId : workflow._taskCompleter.getIds()) {
Task task = TaskUtils.findTaskForRequestIdAssociatedResource(_dbClient, resourceId, workflow.getOrchTaskId());
if (task != null && !taskIds.contains(task.getId())) {
tasks.add(task);
taskIds.add(task.getId());
}
}
} else {
List<Task> foundTasks = TaskUtils.findTasksForRequestId(_dbClient, workflow.getOrchTaskId());
if (foundTasks != null && !foundTasks.isEmpty()) {
tasks.addAll(foundTasks);
}
}
if (tasks != null && !tasks.isEmpty()) {
for (Task task : tasks) {
task.setWorkflow(workflow.getWorkflowURI());
}
_dbClient.updateObject(tasks);
}
}
} catch (DatabaseException ex) {
_log.error("Cannot persist Cassandra Workflow record " + workflow.getWorkflowURI().toString(), ex);
}
}
Aggregations