use of org.apache.airavata.model.task.TaskTypes in project airavata by apache.
the class GFacEngineImpl method executeTaskListFrom.
private void executeTaskListFrom(ProcessContext processContext, String startingTaskId) throws GFacException {
// checkpoint
if (processContext.isInterrupted() && processContext.getProcessState() != ProcessState.MONITORING) {
GFacUtils.handleProcessInterrupt(processContext);
return;
}
List<TaskModel> taskList = processContext.getTaskList();
Map<String, TaskModel> taskMap = processContext.getTaskMap();
boolean fastForward = true;
for (String taskId : processContext.getTaskExecutionOrder()) {
if (fastForward) {
if (taskId.equalsIgnoreCase(startingTaskId)) {
fastForward = false;
} else {
continue;
}
}
TaskModel taskModel = taskMap.get(taskId);
processContext.setCurrentExecutingTaskModel(taskModel);
TaskTypes taskType = taskModel.getTaskType();
TaskContext taskContext = getTaskContext(processContext);
taskContext.setTaskModel(taskModel);
ProcessStatus status = null;
switch(taskType) {
case ENV_SETUP:
status = new ProcessStatus(ProcessState.CONFIGURING_WORKSPACE);
status.setTimeOfStateChange(AiravataUtils.getCurrentTimestamp().getTime());
processContext.setProcessStatus(status);
GFacUtils.saveAndPublishProcessStatus(processContext);
// checkpoint
if (processContext.isInterrupted()) {
GFacUtils.handleProcessInterrupt(processContext);
return;
}
configureWorkspace(taskContext, processContext.isRecovery());
// checkpoint
if (processContext.isInterrupted()) {
GFacUtils.handleProcessInterrupt(processContext);
return;
}
break;
case DATA_STAGING:
try {
// checkpoint
if (processContext.isInterrupted()) {
GFacUtils.handleProcessInterrupt(processContext);
return;
}
DataStagingTaskModel subTaskModel = (DataStagingTaskModel) taskContext.getSubTaskModel();
DataStageType type = subTaskModel.getType();
switch(type) {
case INPUT:
status = new ProcessStatus(ProcessState.INPUT_DATA_STAGING);
status.setTimeOfStateChange(AiravataUtils.getCurrentTimestamp().getTime());
processContext.setProcessStatus(status);
GFacUtils.saveAndPublishProcessStatus(processContext);
taskContext.setProcessInput(subTaskModel.getProcessInput());
inputDataStaging(taskContext, processContext.isRecovery());
break;
case OUPUT:
status = new ProcessStatus(ProcessState.OUTPUT_DATA_STAGING);
status.setTimeOfStateChange(AiravataUtils.getCurrentTimestamp().getTime());
processContext.setProcessStatus(status);
GFacUtils.saveAndPublishProcessStatus(processContext);
taskContext.setProcessOutput(subTaskModel.getProcessOutput());
outputDataStaging(taskContext, processContext.isRecovery(), false);
break;
case ARCHIVE_OUTPUT:
status = new ProcessStatus(ProcessState.OUTPUT_DATA_STAGING);
status.setTimeOfStateChange(AiravataUtils.getCurrentTimestamp().getTime());
processContext.setProcessStatus(status);
GFacUtils.saveAndPublishProcessStatus(processContext);
outputDataStaging(taskContext, processContext.isRecovery(), true);
break;
}
// checkpoint
if (processContext.isInterrupted()) {
GFacUtils.handleProcessInterrupt(processContext);
return;
}
} catch (TException e) {
throw new GFacException(e);
}
break;
case JOB_SUBMISSION:
// checkpoint
if (processContext.isInterrupted()) {
GFacUtils.handleProcessInterrupt(processContext);
return;
}
status = new ProcessStatus(ProcessState.EXECUTING);
status.setTimeOfStateChange(AiravataUtils.getCurrentTimestamp().getTime());
processContext.setProcessStatus(status);
GFacUtils.saveAndPublishProcessStatus(processContext);
executeJobSubmission(taskContext, processContext.isRecovery());
// Don't put any checkpoint in between JobSubmission and Monitoring tasks
JobStatus jobStatus = processContext.getJobModel().getJobStatuses().get(0);
if (jobStatus != null && (jobStatus.getJobState() == JobState.SUBMITTED || jobStatus.getJobState() == JobState.QUEUED || jobStatus.getJobState() == JobState.ACTIVE)) {
List<OutputDataObjectType> processOutputs = processContext.getProcessModel().getProcessOutputs();
if (processOutputs != null && !processOutputs.isEmpty()) {
for (OutputDataObjectType output : processOutputs) {
try {
if (output.isOutputStreaming()) {
TaskModel streamingTaskModel = new TaskModel();
streamingTaskModel.setTaskType(TaskTypes.OUTPUT_FETCHING);
streamingTaskModel.setTaskStatuses(Arrays.asList(new TaskStatus(TaskState.CREATED)));
streamingTaskModel.setCreationTime(AiravataUtils.getCurrentTimestamp().getTime());
streamingTaskModel.setParentProcessId(processContext.getProcessId());
TaskContext streamingTaskContext = getTaskContext(processContext);
DataStagingTaskModel submodel = new DataStagingTaskModel();
submodel.setType(DataStageType.OUPUT);
submodel.setProcessOutput(output);
URI source = new URI(processContext.getDataMovementProtocol().name(), processContext.getComputeResourceLoginUserName(), processContext.getComputeResourceDescription().getHostName(), 22, processContext.getWorkingDir() + output.getValue(), null, null);
submodel.setSource(source.getPath());
submodel.setDestination("dummy://temp/file/location");
streamingTaskModel.setSubTaskModel(ThriftUtils.serializeThriftObject(submodel));
String streamTaskId = (String) processContext.getExperimentCatalog().add(ExpCatChildDataType.TASK, streamingTaskModel, processContext.getProcessId());
streamingTaskModel.setTaskId(streamTaskId);
streamingTaskContext.setTaskModel(streamingTaskModel);
executeDataStreaming(streamingTaskContext, processContext.isRecovery());
}
} catch (URISyntaxException | TException | RegistryException e) {
log.error("Error while streaming output " + output.getValue());
}
}
}
}
break;
case MONITORING:
status = new ProcessStatus(ProcessState.MONITORING);
status.setTimeOfStateChange(AiravataUtils.getCurrentTimestamp().getTime());
processContext.setProcessStatus(status);
GFacUtils.saveAndPublishProcessStatus(processContext);
executeJobMonitoring(taskContext, processContext.isRecovery());
break;
case ENV_CLEANUP:
// TODO implement environment clean up task logic
break;
default:
throw new GFacException("Unsupported Task type");
}
if (processContext.isPauseTaskExecution()) {
// If any task put processContext to wait, the same task must continue processContext execution.
return;
}
}
processContext.setComplete(true);
}
Aggregations