use of com.hashicorp.nomad.apimodel.Job in project twister2 by DSC-SPIDAL.
the class NomadController method getJob.
private Job getJob(JobAPI.Job job) {
String jobID = job.getJobId();
Job nomadJob = new Job();
nomadJob.setId(jobID);
nomadJob.setName(jobID);
nomadJob.setType("batch");
nomadJob.addTaskGroups(getTaskGroup(job));
nomadJob.setDatacenters(Arrays.asList(NomadContext.NOMAD_DEFAULT_DATACENTER));
nomadJob.setMeta(getMetaData(job));
return nomadJob;
}
use of com.hashicorp.nomad.apimodel.Job in project incubator-heron by apache.
the class NomadScheduler method onKill.
@Override
public boolean onKill(Scheduler.KillTopologyRequest request) {
String topologyName = request.getTopologyName();
LOG.fine("Killing Topology " + topologyName);
NomadApiClient apiClient = getApiClient(NomadContext.getSchedulerURI(this.localConfig));
try {
List<Job> jobs = getTopologyJobs(apiClient, topologyName);
killJobs(apiClient, jobs.toArray(new Job[jobs.size()]));
} catch (RuntimeException e) {
LOG.log(Level.SEVERE, "Failed to kill topology " + topologyName + " with error: " + e.getMessage(), e);
return false;
} finally {
closeClient(apiClient);
}
return true;
}
use of com.hashicorp.nomad.apimodel.Job in project incubator-heron by apache.
the class NomadScheduler method getTopologyJobs.
static List<Job> getTopologyJobs(NomadApiClient apiClient, String topologyName) {
List<JobListStub> jobs = getJobList(apiClient);
List<Job> ret = new LinkedList<>();
for (JobListStub job : jobs) {
Job jobActual;
try {
jobActual = apiClient.getJobsApi().info(job.getId()).getValue();
} catch (IOException | NomadException e) {
String msg = "Failed to retrieve job info for job " + job.getId() + " part of topology " + topologyName;
LOG.log(Level.SEVERE, msg, e);
throw new RuntimeException(msg, e);
}
Map<String, String> metaData = jobActual.getMeta();
if (metaData != null && metaData.containsKey(NomadConstants.NOMAD_TOPOLOGY_NAME)) {
if (metaData.get(NomadConstants.NOMAD_TOPOLOGY_NAME).equals(topologyName)) {
ret.add(jobActual);
}
}
}
return ret;
}
use of com.hashicorp.nomad.apimodel.Job in project incubator-heron by apache.
the class NomadScheduler method onRestart.
@Override
public boolean onRestart(Scheduler.RestartTopologyRequest request) {
LOG.fine("Restarting Topology " + request.getTopologyName() + " container " + request.getContainerIndex());
NomadApiClient apiClient = getApiClient(NomadContext.getSchedulerURI(this.localConfig));
String topologyName = request.getTopologyName();
int containerIndex = request.getContainerIndex();
try {
if (containerIndex == -1) {
// restarting whole topology
List<Job> jobs = getTopologyJobs(apiClient, topologyName);
restartJobs(apiClient, jobs.toArray(new Job[jobs.size()]));
} else {
// restarting single container
Job job = getTopologyContainerJob(apiClient, topologyName, containerIndex);
restartJobs(apiClient, job);
}
} catch (RuntimeException e) {
LOG.log(Level.SEVERE, "Failed to restart topology " + topologyName + " with error: " + e.getMessage(), e);
return false;
} finally {
closeClient(apiClient);
}
return true;
}
use of com.hashicorp.nomad.apimodel.Job in project incubator-heron by apache.
the class NomadScheduler method getJob.
Job getJob(int containerIndex, Optional<PackingPlan.ContainerPlan> containerPlan, Resource containerResource) {
String topologyName = Runtime.topologyName(this.runtimeConfig);
String topologyId = Runtime.topologyId(this.runtimeConfig);
Job job = new Job();
job.setId(getJobId(topologyId, containerIndex));
job.setName(getJobId(topologyName, containerIndex));
job.addTaskGroups(getTaskGroup(getJobId(topologyName, containerIndex), containerIndex, containerResource));
job.setDatacenters(Arrays.asList(NomadConstants.NOMAD_DEFAULT_DATACENTER));
job.setMeta(getMetaData(this.runtimeConfig, containerPlan));
return job;
}
Aggregations