use of org.apache.airavata.model.parallelism.ApplicationParallelismType in project airavata by apache.
the class ApplicationProcessor method isParallelJob.
public static boolean isParallelJob(ProcessContext context) {
ApplicationDeploymentDescription appDep = context.getApplicationDeploymentDescription();
ApplicationParallelismType parallelism = appDep.getParallelism();
boolean isParallel = false;
if (parallelism.equals(ApplicationParallelismType.MPI) || parallelism.equals(ApplicationParallelismType.OPENMP_MPI) || parallelism.equals(ApplicationParallelismType.OPENMP)) {
isParallel = true;
}
return isParallel;
}
use of org.apache.airavata.model.parallelism.ApplicationParallelismType in project airavata by apache.
the class ApplicationProcessor method generateJobSpecificAppElements.
public static void generateJobSpecificAppElements(JobDefinitionType value, ProcessContext context) {
String userName = getUserNameFromContext(context);
// if (userName.equalsIgnoreCase("admin")){
// userName = "CN=zdv575, O=Ultrascan Gateway, C=DE";
// }
ApplicationDeploymentDescription appDep = context.getApplicationDeploymentDescription();
String appname = context.getApplicationInterfaceDescription().getApplicationName();
ApplicationParallelismType parallelism = appDep.getParallelism();
ApplicationType appType = JSDLUtils.getOrCreateApplication(value);
appType.setApplicationName(appname);
// if (appDep.getSetEnvironment().size() > 0) {
// createApplicationEnvironment(value, appDep.getSetEnvironment(), parallelism);
// }
//
String stdout = context.getStdoutLocation();
String stderr = context.getStderrLocation();
if (stdout != null) {
stdout = stdout.substring(stdout.lastIndexOf('/') + 1);
}
if (stderr != null) {
stderr = stderr.substring(stderr.lastIndexOf('/') + 1);
}
stdout = (stdout == null || stdout.equals("")) ? "stdout" : stdout;
stderr = (stdout == null || stderr.equals("")) ? "stderr" : stderr;
if (appDep.getExecutablePath() != null) {
FileNameType fNameType = FileNameType.Factory.newInstance();
fNameType.setStringValue(appDep.getExecutablePath());
if (isParallelJob(context)) {
JSDLUtils.getOrCreateSPMDApplication(value).setExecutable(fNameType);
if (parallelism.equals(ApplicationParallelismType.OPENMP_MPI)) {
JSDLUtils.getSPMDApplication(value).setSPMDVariation(SPMDVariations.OpenMPI.value());
} else if (parallelism.equals(ApplicationParallelismType.MPI)) {
JSDLUtils.getSPMDApplication(value).setSPMDVariation(SPMDVariations.MPI.value());
}
// setting number of processes
try {
String np = getInputAsString(context, BESConstants.NUMBER_OF_PROCESSES);
if ((np != null) && (Integer.parseInt(np) > 0)) {
NumberOfProcessesType num = NumberOfProcessesType.Factory.newInstance();
num.setStringValue(np);
JSDLUtils.getSPMDApplication(value).setNumberOfProcesses(num);
}
} catch (RuntimeException np) {
// do nothing
}
try {
// setting processes per host
String pphost = getInputAsString(context, BESConstants.PROCESSES_PER_HOST);
if ((pphost != null) && (Integer.parseInt(pphost) > 0)) {
ProcessesPerHostType pph = ProcessesPerHostType.Factory.newInstance();
pph.setStringValue(String.valueOf(pphost));
JSDLUtils.getSPMDApplication(value).setProcessesPerHost(pph);
}
} catch (RuntimeException np) {
// do nothing
}
int totalThreadCount = context.getProcessModel().getProcessResourceSchedule().getNumberOfThreads();
// we take it as threads per processes
if (totalThreadCount > 0) {
ThreadsPerProcessType tpp = ThreadsPerProcessType.Factory.newInstance();
tpp.setStringValue(String.valueOf(totalThreadCount));
JSDLUtils.getSPMDApplication(value).setThreadsPerProcess(tpp);
}
if (userName != null) {
UserNameType userNameType = UserNameType.Factory.newInstance();
userNameType.setStringValue(userName);
JSDLUtils.getSPMDApplication(value).setUserName(userNameType);
}
if (stdout != null) {
FileNameType fName = FileNameType.Factory.newInstance();
fName.setStringValue(stdout);
JSDLUtils.getOrCreateSPMDApplication(value).setOutput(fName);
}
if (stderr != null) {
FileNameType fName = FileNameType.Factory.newInstance();
fName.setStringValue(stderr);
JSDLUtils.getOrCreateSPMDApplication(value).setError(fName);
}
} else {
JSDLUtils.getOrCreatePOSIXApplication(value).setExecutable(fNameType);
if (userName != null) {
UserNameType userNameType = UserNameType.Factory.newInstance();
userNameType.setStringValue(userName);
JSDLUtils.getOrCreatePOSIXApplication(value).setUserName(userNameType);
}
if (stdout != null) {
FileNameType fName = FileNameType.Factory.newInstance();
fName.setStringValue(stdout);
JSDLUtils.getOrCreatePOSIXApplication(value).setOutput(fName);
}
if (stderr != null) {
FileNameType fName = FileNameType.Factory.newInstance();
fName.setStringValue(stderr);
JSDLUtils.getOrCreatePOSIXApplication(value).setError(fName);
}
}
}
}
use of org.apache.airavata.model.parallelism.ApplicationParallelismType in project airavata by apache.
the class GFacUtils method createGroovyMap.
public static GroovyMap createGroovyMap(ProcessContext processContext, TaskContext taskContext) throws GFacException, AppCatalogException, ApplicationSettingsException {
GroovyMap groovyMap = new GroovyMap();
ProcessModel processModel = processContext.getProcessModel();
ResourceJobManager resourceJobManager = getResourceJobManager(processContext);
// set email options and addresses
setMailAddresses(processContext, groovyMap);
groovyMap.add(Script.INPUT_DIR, processContext.getInputDir());
groovyMap.add(Script.OUTPUT_DIR, processContext.getOutputDir());
groovyMap.add(Script.EXECUTABLE_PATH, processContext.getApplicationDeploymentDescription().getExecutablePath());
groovyMap.add(Script.STANDARD_OUT_FILE, processContext.getStdoutLocation());
groovyMap.add(Script.STANDARD_ERROR_FILE, processContext.getStderrLocation());
groovyMap.add(Script.SCRATCH_LOCATION, processContext.getScratchLocation());
groovyMap.add(Script.GATEWAY_ID, processContext.getGatewayId());
groovyMap.add(Script.GATEWAY_USER_NAME, processContext.getProcessModel().getUserName());
groovyMap.add(Script.APPLICATION_NAME, processContext.getApplicationInterfaceDescription().getApplicationName());
groovyMap.add(Script.QUEUE_SPECIFIC_MACROS, processContext.getQueueSpecificMacros());
groovyMap.add(Script.ACCOUNT_STRING, processContext.getAllocationProjectNumber());
groovyMap.add(Script.RESERVATION, processContext.getReservation());
// To make job name alpha numeric
groovyMap.add(Script.JOB_NAME, "A" + String.valueOf(generateJobName()));
groovyMap.add(Script.WORKING_DIR, processContext.getWorkingDir());
List<String> inputValues = getProcessInputValues(processModel.getProcessInputs());
inputValues.addAll(getProcessOutputValues(processModel.getProcessOutputs()));
groovyMap.add(Script.INPUTS, inputValues);
groovyMap.add(Script.USER_NAME, processContext.getJobSubmissionRemoteCluster().getServerInfo().getUserName());
groovyMap.add(Script.SHELL_NAME, "/bin/bash");
// get walltime
if (taskContext != null) {
try {
JobSubmissionTaskModel jobSubmissionTaskModel = ((JobSubmissionTaskModel) taskContext.getSubTaskModel());
if (jobSubmissionTaskModel.getWallTime() > 0) {
groovyMap.add(Script.MAX_WALL_TIME, GFacUtils.maxWallTimeCalculator(jobSubmissionTaskModel.getWallTime()));
if (resourceJobManager != null) {
if (resourceJobManager.getResourceJobManagerType().equals(ResourceJobManagerType.LSF)) {
groovyMap.add(Script.MAX_WALL_TIME, GFacUtils.maxWallTimeCalculatorForLSF(jobSubmissionTaskModel.getWallTime()));
}
}
}
} catch (TException e) {
log.error("Error while getting job submission sub task model", e);
}
}
// NOTE: Give precedence to data comes with experiment
// qos per queue
String qoS = getQoS(processContext.getQualityOfService(), processContext.getQueueName());
if (qoS != null) {
groovyMap.add(Script.QUALITY_OF_SERVICE, qoS);
}
ComputationalResourceSchedulingModel scheduling = processModel.getProcessResourceSchedule();
if (scheduling != null) {
int totalNodeCount = scheduling.getNodeCount();
int totalCPUCount = scheduling.getTotalCPUCount();
if (isValid(scheduling.getQueueName())) {
groovyMap.add(Script.QUEUE_NAME, scheduling.getQueueName());
}
if (totalNodeCount > 0) {
groovyMap.add(Script.NODES, totalNodeCount);
}
if (totalCPUCount > 0) {
int ppn = totalCPUCount / totalNodeCount;
groovyMap.add(Script.PROCESS_PER_NODE, ppn);
groovyMap.add(Script.CPU_COUNT, totalCPUCount);
}
// if so we ignore scheduling configuration.
if (scheduling.getWallTimeLimit() > 0 && groovyMap.get(Script.MAX_WALL_TIME) == null) {
groovyMap.add(Script.MAX_WALL_TIME, GFacUtils.maxWallTimeCalculator(scheduling.getWallTimeLimit()));
if (resourceJobManager != null) {
if (resourceJobManager.getResourceJobManagerType().equals(ResourceJobManagerType.LSF)) {
groovyMap.add(Script.MAX_WALL_TIME, GFacUtils.maxWallTimeCalculatorForLSF(scheduling.getWallTimeLimit()));
}
}
}
if (scheduling.getTotalPhysicalMemory() > 0) {
groovyMap.add(Script.USED_MEM, scheduling.getTotalPhysicalMemory());
}
if (isValid(scheduling.getOverrideLoginUserName())) {
groovyMap.add(Script.USER_NAME, scheduling.getOverrideLoginUserName());
}
if (isValid(scheduling.getOverrideAllocationProjectNumber())) {
groovyMap.add(Script.ACCOUNT_STRING, scheduling.getOverrideAllocationProjectNumber());
}
if (isValid(scheduling.getStaticWorkingDir())) {
groovyMap.add(Script.WORKING_DIR, scheduling.getStaticWorkingDir());
}
} else {
log.error("Task scheduling cannot be null at this point..");
}
ApplicationDeploymentDescription appDepDescription = processContext.getApplicationDeploymentDescription();
List<CommandObject> moduleCmds = appDepDescription.getModuleLoadCmds();
if (moduleCmds != null) {
List<String> modulesCmdCollect = moduleCmds.stream().sorted((e1, e2) -> e1.getCommandOrder() - e2.getCommandOrder()).map(map -> map.getCommand()).collect(Collectors.toList());
groovyMap.add(Script.MODULE_COMMANDS, modulesCmdCollect);
}
List<CommandObject> preJobCommands = appDepDescription.getPreJobCommands();
if (preJobCommands != null) {
List<String> preJobCmdCollect = preJobCommands.stream().sorted((e1, e2) -> e1.getCommandOrder() - e2.getCommandOrder()).map(map -> parseCommands(map.getCommand(), groovyMap)).collect(Collectors.toList());
groovyMap.add(Script.PRE_JOB_COMMANDS, preJobCmdCollect);
}
List<CommandObject> postJobCommands = appDepDescription.getPostJobCommands();
if (postJobCommands != null) {
List<String> postJobCmdCollect = postJobCommands.stream().sorted((e1, e2) -> e1.getCommandOrder() - e2.getCommandOrder()).map(map -> parseCommands(map.getCommand(), groovyMap)).collect(Collectors.toList());
groovyMap.add(Script.POST_JOB_COMMANDS, postJobCmdCollect);
}
ApplicationParallelismType parallelism = appDepDescription.getParallelism();
if (parallelism != null) {
if (parallelism != ApplicationParallelismType.SERIAL) {
Map<ApplicationParallelismType, String> parallelismPrefix = processContext.getResourceJobManager().getParallelismPrefix();
if (parallelismPrefix != null) {
String parallelismCommand = parallelismPrefix.get(parallelism);
if (parallelismCommand != null) {
groovyMap.add(Script.JOB_SUBMITTER_COMMAND, parallelismCommand);
} else {
throw new GFacException("Parallelism prefix is not defined for given parallelism type " + parallelism + ".. Please define the parallelism prefix at App Catalog");
}
}
}
}
return groovyMap;
}
use of org.apache.airavata.model.parallelism.ApplicationParallelismType in project airavata by apache.
the class ComputeResourceImpl method updateResourceJobManager.
@Override
public void updateResourceJobManager(String resourceJobManagerId, ResourceJobManager updatedResourceJobManager) throws AppCatalogException {
try {
ResourceJobManagerResource resource = AppCatalogThriftConversion.getResourceJobManager(updatedResourceJobManager);
resource.setResourceJobManagerId(resourceJobManagerId);
resource.save();
Map<JobManagerCommand, String> jobManagerCommands = updatedResourceJobManager.getJobManagerCommands();
if (jobManagerCommands != null && jobManagerCommands.size() != 0) {
for (JobManagerCommand commandType : jobManagerCommands.keySet()) {
JobManagerCommandResource r = new JobManagerCommandResource();
Map<String, String> ids = new HashMap<String, String>();
ids.put(AppCatAbstractResource.JobManagerCommandConstants.RESOURCE_JOB_MANAGER_ID, resourceJobManagerId);
ids.put(AppCatAbstractResource.JobManagerCommandConstants.COMMAND_TYPE, commandType.toString());
JobManagerCommandResource existingCommand;
if (r.isExists(ids)) {
existingCommand = (JobManagerCommandResource) r.get(ids);
} else {
existingCommand = new JobManagerCommandResource();
}
if (jobManagerCommands.get(commandType) != null && !jobManagerCommands.get(commandType).isEmpty()) {
existingCommand.setCommandType(commandType.toString());
existingCommand.setCommand(jobManagerCommands.get(commandType));
existingCommand.setResourceJobManagerId(resource.getResourceJobManagerId());
existingCommand.save();
}
}
}
Map<ApplicationParallelismType, String> parallelismPrefix = updatedResourceJobManager.getParallelismPrefix();
if (parallelismPrefix != null && parallelismPrefix.size() != 0) {
for (ApplicationParallelismType commandType : parallelismPrefix.keySet()) {
ParallelismPrefixCommandResource r = new ParallelismPrefixCommandResource();
Map<String, String> ids = new HashMap<String, String>();
ids.put(AppCatAbstractResource.ParallelismCommandConstants.RESOURCE_JOB_MANAGER_ID, resourceJobManagerId);
ids.put(AppCatAbstractResource.ParallelismCommandConstants.COMMAND_TYPE, commandType.toString());
ParallelismPrefixCommandResource existingCommand;
if (r.isExists(ids)) {
existingCommand = (ParallelismPrefixCommandResource) r.get(ids);
} else {
existingCommand = new ParallelismPrefixCommandResource();
}
if (parallelismPrefix.get(commandType) != null && !parallelismPrefix.get(commandType).isEmpty()) {
existingCommand.setCommandType(commandType.toString());
existingCommand.setCommand(parallelismPrefix.get(commandType));
existingCommand.setResourceJobManagerId(resource.getResourceJobManagerId());
existingCommand.save();
}
}
}
} catch (Exception e) {
logger.error("Error while updating resource job manager..", e);
throw new AppCatalogException(e);
}
}
use of org.apache.airavata.model.parallelism.ApplicationParallelismType in project airavata by apache.
the class ComputeResourceImpl method addResourceJobManager.
@Override
public String addResourceJobManager(ResourceJobManager resourceJobManager) throws AppCatalogException {
resourceJobManager.setResourceJobManagerId(AppCatalogUtils.getID("RJM"));
ResourceJobManagerResource resource = AppCatalogThriftConversion.getResourceJobManager(resourceJobManager);
resource.save();
Map<JobManagerCommand, String> jobManagerCommands = resourceJobManager.getJobManagerCommands();
if (jobManagerCommands != null && jobManagerCommands.size() != 0) {
for (JobManagerCommand commandType : jobManagerCommands.keySet()) {
if (jobManagerCommands.get(commandType) != null && !jobManagerCommands.get(commandType).isEmpty()) {
JobManagerCommandResource r = new JobManagerCommandResource();
r.setCommandType(commandType.toString());
r.setCommand(jobManagerCommands.get(commandType));
r.setResourceJobManagerId(resource.getResourceJobManagerId());
r.save();
}
}
}
Map<ApplicationParallelismType, String> parallelismPrefix = resourceJobManager.getParallelismPrefix();
if (parallelismPrefix != null && parallelismPrefix.size() != 0) {
for (ApplicationParallelismType commandType : parallelismPrefix.keySet()) {
if (parallelismPrefix.get(commandType) != null && !parallelismPrefix.get(commandType).isEmpty()) {
ParallelismPrefixCommandResource r = new ParallelismPrefixCommandResource();
r.setCommandType(commandType.toString());
r.setCommand(parallelismPrefix.get(commandType));
r.setResourceJobManagerId(resource.getResourceJobManagerId());
r.save();
}
}
}
return resource.getResourceJobManagerId();
}
Aggregations