use of org.ow2.proactive.scheduler.common.Scheduler in project scheduling by ow2-proactive.
the class ThirdPartyCredentialKeySetCommand method execute.
@Override
public void execute(ApplicationContext currentContext) throws CLIException {
SchedulerRestInterface scheduler = currentContext.getRestClient().getScheduler();
try {
Set<String> credentialsKeySet = scheduler.thirdPartyCredentialsKeySet(currentContext.getSessionId());
resultStack(currentContext).push(credentialsKeySet);
if (!currentContext.isSilent()) {
writeLine(currentContext, "%s", StringUtility.credentialsKeysAsString(credentialsKeySet));
}
} catch (Exception e) {
handleError("An error occurred while removing third-party credential:", e, currentContext);
}
}
use of org.ow2.proactive.scheduler.common.Scheduler in project scheduling by ow2-proactive.
the class AbstractFunctCmdTest method submitJob.
protected JobId submitJob(String filename, JobStatus waitForStatus) throws Exception {
File jobFile = new File(this.getClass().getResource("config/" + filename).toURI());
WorkflowSubmitter submitter = new WorkflowSubmitter(scheduler);
JobId id = submitter.submit(jobFile, new HashMap<String, String>());
waitJobState(id, waitForStatus, 500000);
return id;
}
use of org.ow2.proactive.scheduler.common.Scheduler in project scheduling by ow2-proactive.
the class AbstractFunctCmdTest method cleanScheduler.
protected void cleanScheduler() throws NotConnectedException, PermissionException, UnknownJobException {
scheduler = RestFuncTHelper.getScheduler();
SchedulerState state = scheduler.getState();
System.out.println("Cleaning scheduler.");
List<JobState> aliveJobsStates = new ArrayList<>(state.getPendingJobs().size() + state.getRunningJobs().size());
aliveJobsStates.addAll(state.getPendingJobs());
aliveJobsStates.addAll(state.getRunningJobs());
List<JobState> finishedJobsStates = new ArrayList<>(state.getFinishedJobs().size());
finishedJobsStates.addAll(state.getFinishedJobs());
for (JobState jobState : aliveJobsStates) {
JobId jobId = jobState.getId();
try {
System.out.println("Killing job " + jobId);
scheduler.killJob(jobId);
} catch (Exception ignored) {
}
System.out.println("Removing killed job " + jobId);
scheduler.removeJob(jobId);
}
for (JobState jobState : finishedJobsStates) {
JobId jobId = jobState.getId();
System.out.println("Removing finished job " + jobId);
scheduler.removeJob(jobId);
}
}
use of org.ow2.proactive.scheduler.common.Scheduler in project scheduling by ow2-proactive.
the class SchedulerStateListener method getSchedulerStatus.
public SchedulerStatus getSchedulerStatus(Scheduler scheduler) throws PermissionException, NotConnectedException {
SchedulerStatus status = state.getStatus();
if (status == null) {
status = scheduler.getStatus();
state.setStatus(status);
}
return status;
}
use of org.ow2.proactive.scheduler.common.Scheduler in project scheduling by ow2-proactive.
the class TaskLauncher method doTask.
public void doTask(ExecutableContainer executableContainer, TaskResult[] previousTasksResults, TaskTerminateNotification terminateNotification, String terminateNotificationNodeURL, boolean taskRecoverable) {
TaskResultImpl taskResult;
WallTimer wallTimer = null;
TaskContext context = null;
Stopwatch taskStopwatchForFailures = null;
TaskDataspaces dataspaces = null;
try {
taskStarted.set(true);
logger.info("Task started " + taskId.getJobId().getReadableName() + " : " + taskId.getReadableName());
this.taskKiller = this.replaceTaskKillerWithDoubleTimeoutValueIfRunAsMe(executableContainer.isRunAsUser());
wallTimer = new WallTimer(initializer.getWalltime(), taskKiller);
taskStopwatchForFailures = Stopwatch.createUnstarted();
taskLauncherRebinder = new TaskLauncherRebinder(taskId, terminateNotificationNodeURL, taskRecoverable);
addShutdownHook();
// lock the cache space cleaning mechanism
DataSpaceNodeConfigurationAgent.lockCacheSpaceCleaning();
dataspaces = factory.createTaskDataspaces(taskId, initializer.getNamingService(), executableContainer.isRunAsUser());
File taskLogFile = taskLogger.createFileAppender(dataspaces.getScratchFolder());
progressFileReader.start(dataspaces.getScratchFolder(), taskId);
context = new TaskContext(executableContainer, initializer, previousTasksResults, new NodeDataSpacesURIs(dataspaces.getScratchURI(), dataspaces.getCacheURI(), dataspaces.getInputURI(), dataspaces.getOutputURI(), dataspaces.getUserURI(), dataspaces.getGlobalURI()), progressFileReader.getProgressFile().toString(), getHostname(), decrypter);
File workingDir = getTaskWorkingDir(context, dataspaces);
logger.info("Task working dir: " + workingDir);
logger.info("Cache space: " + context.getNodeDataSpaceURIs().getCacheURI());
logger.info("Input space: " + context.getNodeDataSpaceURIs().getInputURI());
logger.info("Output space: " + context.getNodeDataSpaceURIs().getOutputURI());
logger.info("User space: " + context.getNodeDataSpaceURIs().getUserURI());
logger.info("Global space: " + context.getNodeDataSpaceURIs().getGlobalURI());
logger.info("Scheduler rest url: " + context.getSchedulerRestUrl());
wallTimer.start();
// should handle interrupt
dataspaces.copyInputDataToScratch(initializer.getFilteredInputFiles(fileSelectorsFilters(context)));
if (decrypter != null) {
decrypter.setCredentials(executableContainer.getCredentials());
}
TaskExecutor taskExecutor = factory.createTaskExecutor(workingDir);
taskStopwatchForFailures.start();
taskResult = taskExecutor.execute(context, taskLogger.getOutputSink(), taskLogger.getErrorSink());
taskStopwatchForFailures.stop();
// by the time the task finishes, the scheduler might have had a
// transient failure, so we need to make sure that the placeholder
// for the task's result still exists, or get the new place for
// the result if it does not exist anymore.
TaskTerminateNotification rebindedTerminateNotification = taskLauncherRebinder.makeSureSchedulerIsConnected(terminateNotification);
switch(taskKiller.getStatus()) {
case WALLTIME_REACHED:
taskResult = getWalltimedTaskResult(context, taskStopwatchForFailures);
sendResultToScheduler(rebindedTerminateNotification, taskResult);
return;
case KILLED_MANUALLY:
// killed by Scheduler, no need to send results back
return;
}
dataspaces.copyScratchDataToOutput(initializer.getFilteredOutputFiles(fileSelectorsFilters(context, taskResult)));
wallTimer.stop();
copyTaskLogsToUserSpace(taskLogFile, dataspaces);
taskResult.setLogs(taskLogger.getLogs());
sendResultToScheduler(rebindedTerminateNotification, taskResult);
} catch (Throwable taskFailure) {
if (wallTimer != null) {
wallTimer.stop();
}
switch(taskKiller.getStatus()) {
case WALLTIME_REACHED:
taskResult = getWalltimedTaskResult(context, taskStopwatchForFailures);
sendResultToScheduler(terminateNotification, taskResult);
break;
case KILLED_MANUALLY:
// killed by Scheduler, no need to send results back
return;
default:
logger.info("Failed to execute task", taskFailure);
long elapsedTime = 0;
if (taskStopwatchForFailures != null) {
elapsedTime = taskStopwatchForFailures.elapsed(TimeUnit.MILLISECONDS);
}
taskFailure.printStackTrace(taskLogger.getErrorSink());
Map<String, byte[]> serializedVariables = extractVariablesFromContext(context);
taskResult = new TaskResultImpl(taskId, taskFailure, taskLogger.getLogs(), elapsedTime);
taskResult.setPropagatedVariables(serializedVariables);
sendResultToScheduler(terminateNotification, taskResult);
}
} finally {
try {
progressFileReader.stop();
taskLogger.close();
if (dataspaces != null) {
dataspaces.close();
}
// unlocks the cache space cleaning thread
DataSpaceNodeConfigurationAgent.unlockCacheSpaceCleaning();
removeShutdownHook();
} finally {
terminate();
}
}
}
Aggregations