use of com.netflix.conductor.common.metadata.tasks.Task.Status.CANCELED in project conductor by Netflix.
the class WorkflowExecutor method retry.
/**
* Gets the last instance of each failed task and reschedule each
* Gets all cancelled tasks and schedule all of them except JOIN (join should change status to INPROGRESS)
* Switch workflow back to RUNNING status and call decider.
*
* @param workflowId the id of the workflow to be retried
*/
public void retry(String workflowId, boolean resumeSubworkflowTasks) {
Workflow workflow = executionDAOFacade.getWorkflowById(workflowId, true);
if (!workflow.getStatus().isTerminal()) {
throw new ApplicationException(CONFLICT, "Workflow is still running. status=" + workflow.getStatus());
}
if (workflow.getTasks().isEmpty()) {
throw new ApplicationException(CONFLICT, "Workflow has not started yet");
}
if (resumeSubworkflowTasks) {
Optional<Task> lTask = workflow.getTasks().stream().filter(this::findLastFailedOrTimeOutTask).findFirst();
if (lTask.isPresent()) {
workflow = findLastFailedSubWorkflow(lTask.get(), workflow);
}
}
// Get all FAILED or CANCELED tasks that are not COMPLETED (or reach other terminal states) on further executions.
// // Eg: for Seq of tasks task1.CANCELED, task1.COMPLETED, task1 shouldn't be retried.
// Throw an exception if there are no FAILED tasks.
// Handle JOIN task CANCELED status as special case.
Map<String, Task> retriableMap = new HashMap<>();
for (Task task : workflow.getTasks()) {
switch(task.getStatus()) {
case FAILED:
case FAILED_WITH_TERMINAL_ERROR:
case TIMED_OUT:
retriableMap.put(task.getReferenceTaskName(), task);
break;
case CANCELED:
if (task.getTaskType().equalsIgnoreCase(TaskType.JOIN.toString()) || task.getTaskType().equalsIgnoreCase(TaskType.DO_WHILE.toString())) {
task.setStatus(IN_PROGRESS);
// Task doesn't have to be updated yet. Will be updated along with other Workflow tasks downstream.
} else {
retriableMap.put(task.getReferenceTaskName(), task);
}
break;
default:
retriableMap.remove(task.getReferenceTaskName());
break;
}
}
if (retriableMap.values().size() == 0) {
throw new ApplicationException(CONFLICT, "There are no retriable tasks! Use restart if you want to attempt entire workflow execution again.");
}
// Update Workflow with new status.
// This should load Workflow from archive, if archived.
workflow.setStatus(WorkflowStatus.RUNNING);
workflow.setLastRetriedTime(System.currentTimeMillis());
// Add to decider queue
queueDAO.push(DECIDER_QUEUE, workflow.getWorkflowId(), workflow.getPriority(), config.getSweepFrequency());
executionDAOFacade.updateWorkflow(workflow);
// taskToBeRescheduled would set task `retried` to true, and hence it's important to updateTasks after obtaining task copy from taskToBeRescheduled.
final Workflow finalWorkflow = workflow;
List<Task> retriableTasks = retriableMap.values().stream().sorted(Comparator.comparingInt(Task::getSeq)).map(task -> taskToBeRescheduled(finalWorkflow, task)).collect(Collectors.toList());
dedupAndAddTasks(workflow, retriableTasks);
// Note: updateTasks before updateWorkflow might fail when Workflow is archived and doesn't exist in primary store.
executionDAOFacade.updateTasks(workflow.getTasks());
scheduleTask(workflow, retriableTasks);
decide(workflowId);
if (resumeSubworkflowTasks) {
updateParentWorkflowRecursively(workflow);
} else if (StringUtils.isNotEmpty(workflow.getParentWorkflowId())) {
updateParentWorkflow(workflow);
decide(workflow.getParentWorkflowId());
}
}
Aggregations