use of edu.iu.dsc.tws.api.driver.NullScaler in project twister2 by DSC-SPIDAL.
the class MPILauncher method launch.
@Override
public Twister2JobState launch(JobAPI.Job job) {
LOG.log(Level.INFO, "Launching job for cluster {0}", MPIContext.clusterType(config));
Twister2JobState state = new Twister2JobState(false);
if (!configsOK()) {
return state;
}
// distributing bundle if not running in shared file system
if (!MPIContext.isSharedFs(config)) {
LOG.info("Configured as NON SHARED file system. " + "Running bootstrap procedure to distribute files...");
try {
this.distributeJobFiles(job);
} catch (IOException e) {
LOG.log(Level.SEVERE, "Error in distributing job files", e);
throw new RuntimeException("Error in distributing job files");
}
} else {
LOG.info("Configured as SHARED file system. " + "Skipping bootstrap procedure & setting up working directory");
if (!setupWorkingDirectory(job.getJobId())) {
throw new RuntimeException("Failed to setup the directory");
}
}
config = Config.newBuilder().putAll(config).put(SchedulerContext.WORKING_DIRECTORY, jobWorkingDirectory).build();
JobMaster jobMaster = null;
Thread jmThread = null;
if (JobMasterContext.isJobMasterUsed(config) && JobMasterContext.jobMasterRunsInClient(config)) {
// Since the job master is running on client we can collect job information
state.setDetached(false);
try {
int port = NetworkUtils.getFreePort();
String hostAddress = JobMasterContext.jobMasterIP(config);
if (hostAddress == null) {
hostAddress = ResourceSchedulerUtils.getHostIP(config);
}
// add the port and ip to config
config = Config.newBuilder().putAll(config).put("__job_master_port__", port).put("__job_master_ip__", hostAddress).build();
LOG.log(Level.INFO, String.format("Starting the job master: %s:%d", hostAddress, port));
JobMasterAPI.NodeInfo jobMasterNodeInfo = NodeInfoUtils.createNodeInfo(hostAddress, "default", "default");
IScalerPerCluster nullScaler = new NullScaler();
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, "0.0.0.0", port, nt, job, jobMasterNodeInfo, nullScaler, initialState);
jobMaster.addShutdownHook(true);
jmThread = jobMaster.startJobMasterThreaded();
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
}
final boolean[] start = { false };
// now start the controller, which will get the resources and start
Thread controllerThread = new Thread(() -> {
IController controller = new MPIController(true);
controller.initialize(config);
start[0] = controller.start(job);
});
controllerThread.setName("MPIController");
controllerThread.start();
// wait until the controller finishes
try {
controllerThread.join();
} catch (InterruptedException ignore) {
}
// now lets wait on client
if (jmThread != null && JobMasterContext.isJobMasterUsed(config) && JobMasterContext.jobMasterRunsInClient(config)) {
try {
jmThread.join();
} catch (InterruptedException ignore) {
}
}
if (jobMaster != null && jobMaster.getDriver() != null) {
if (jobMaster.getDriver().getState() != DriverJobState.FAILED) {
state.setJobstate(DriverJobState.COMPLETED);
} else {
state.setJobstate(jobMaster.getDriver().getState());
}
state.setFinalMessages(jobMaster.getDriver().getMessages());
}
state.setRequestGranted(start[0]);
return state;
}
use of edu.iu.dsc.tws.api.driver.NullScaler in project twister2 by DSC-SPIDAL.
the class MPIWorkerStarter method startMaster.
/**
* Start the JobMaster
*/
private void startMaster() {
try {
// init the logger
initJMLogger(config);
// release the port for JM
NetworkUtils.releaseWorkerPorts();
int port = JobMasterContext.jobMasterPort(config);
String hostAddress = ResourceSchedulerUtils.getHostIP(config);
LOG.log(Level.INFO, String.format("Starting the job master: %s:%d", hostAddress, port));
JobMasterAPI.NodeInfo jobMasterNodeInfo = null;
IScalerPerCluster clusterScaler = new NullScaler();
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, "0.0.0.0", port, nt, job, jobMasterNodeInfo, clusterScaler, initialState);
jobMaster.startJobMasterBlocking();
LOG.log(Level.INFO, "JobMaster done... ");
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
}
use of edu.iu.dsc.tws.api.driver.NullScaler in project twister2 by DSC-SPIDAL.
the class MesosJobMasterStarter method main.
public static void main(String[] args) {
// we can not initialize the logger fully yet,
// but we need to set the format as the first thing
String homeDir = System.getenv("HOME");
int workerId = Integer.parseInt(System.getenv("WORKER_ID"));
String jobName = System.getenv("JOB_NAME");
String jobId = System.getenv("JOB_ID");
String twister2Home = Paths.get("").toAbsolutePath().toString();
String configDir = "twister2-job";
Config config = ConfigLoader.loadConfig(twister2Home, configDir, "mesos");
Config.Builder builder = Config.newBuilder().putAll(config);
builder.put(Context.JOB_ID, jobId);
config = builder.build();
JobTerminator terminator = new JobTerminator(config, System.getenv("FRAMEWORK_ID"));
MesosWorkerLogger logger = new MesosWorkerLogger(config, "/persistent-volume/logs", "master");
logger.initLogging();
edu.iu.dsc.tws.rsched.schedulers.mesos.MesosController controller;
controller = new edu.iu.dsc.tws.rsched.schedulers.mesos.MesosController(config);
JobAPI.Job job = JobUtils.readJobFile("twister2-job/" + jobName + ".job");
// try {
// workerController = new MesosWorkerController(config, job,
// Inet4Address.getLocalHost().getHostAddress(), 2023, workerId);
// LOG.info("Initializing with zookeeper");
// workerController.initializeWithZooKeeper();
// LOG.info("Waiting for all workers to join");
// workerController.getAllWorkers(
// ZKContext.maxWaitTimeForAllWorkersToJoin(config));
// LOG.info("Everyone has joined");
// // //container.execute(worker.config, id, null, workerController, null);
//
//
// } catch (Exception e) {
// LOG.severe("Error " + e.getMessage());
// }
// this block is for ZKjobmaster register
ZKJobMasterRegistrar registrar = null;
try {
registrar = new ZKJobMasterRegistrar(config, Inet4Address.getLocalHost().getHostAddress(), 11011, job.getJobId());
LOG.info("JobMaster REGISTERED..:" + Inet4Address.getLocalHost().getHostAddress());
} catch (UnknownHostException e) {
LOG.info("JobMaster CAN NOT BE REGISTERED:");
e.printStackTrace();
}
boolean initialized = registrar.initialize();
if (!initialized) {
LOG.info("CAN NOT INITIALIZE");
}
if (!initialized && registrar.sameZNodeExist()) {
registrar.deleteJobMasterZNode();
registrar.initialize();
}
if (!JobMasterContext.jobMasterRunsInClient(config)) {
JobMaster jobMaster;
try {
String workerIp = Inet4Address.getLocalHost().getHostAddress();
JobMasterAPI.NodeInfo jobMasterNodeInfo = MesosContext.getNodeInfo(config, workerIp);
IScalerPerCluster clusterScaler = new NullScaler();
MesosScaler mesosScaler = new MesosScaler(config, job, controller);
mesosScaler.setFrameWorkId(System.getenv("FRAMEWORK_ID"));
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
// JobMaster.jobID = jobId;
jobMaster = new JobMaster(config, InetAddress.getLocalHost().getHostAddress(), terminator, job, jobMasterNodeInfo, clusterScaler, initialState);
// jobMaster.jobId = jobId;
LOG.info("JobMaster host address...:" + InetAddress.getLocalHost().getHostAddress());
jobMaster.startJobMasterBlocking();
// jobMaster.startJobMasterThreaded();
} catch (Exception e) {
LOG.log(Level.SEVERE, "Exception when getting local host address: ", e);
}
}
waitIndefinitely();
registrar.deleteJobMasterZNode();
registrar.close();
}
use of edu.iu.dsc.tws.api.driver.NullScaler in project twister2 by DSC-SPIDAL.
the class NomadJobMasterStarter method launch.
/**
* launch the job master
*
* @return false if setup fails
*/
public boolean launch() {
// get the job working directory
/* String jobWorkingDirectory = NomadContext.workingDirectory(config);
LOG.log(Level.INFO, "job working directory ....." + jobWorkingDirectory);
if (NomadContext.sharedFileSystem(config)) {
if (!setupWorkingDirectory(job, jobWorkingDirectory)) {
throw new RuntimeException("Failed to setup the directory");
}
}
Config newConfig = Config.newBuilder().putAll(config).put(
SchedulerContext.WORKING_DIRECTORY, jobWorkingDirectory).build();
// now start the controller, which will get the resources from
// slurm and start the job
//IController controller = new NomadController(true);
controller.initialize(newConfig);*/
String indexEnv = System.getenv("NOMAD_ALLOC_INDEX");
String idEnv = System.getenv("NOMAD_ALLOC_ID");
int workerID = Integer.valueOf(indexEnv);
MPIWorkerStarter.initJMLogger(config);
LOG.log(Level.INFO, String.format("Worker id = %s and index = %d", idEnv, workerID));
ZKJobMasterRegistrar registrar = null;
int port = JobMasterContext.jobMasterPort(config);
String hostAddress = null;
try {
hostAddress = Inet4Address.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
e.printStackTrace();
}
try {
registrar = new ZKJobMasterRegistrar(config, hostAddress, port, job.getJobId());
LOG.info("JobMaster REGISTERED..:" + hostAddress);
} catch (Exception e) {
LOG.info("JobMaster CAN NOT BE REGISTERED:");
e.printStackTrace();
}
boolean initialized = registrar.initialize();
if (!initialized) {
LOG.info("CAN NOT INITIALIZE");
}
if (!initialized && registrar.sameZNodeExist()) {
registrar.deleteJobMasterZNode();
registrar.initialize();
}
// start the Job Master locally
JobMaster jobMaster = null;
JobMasterAPI.NodeInfo jobMasterNodeInfo = NomadContext.getNodeInfo(config, hostAddress);
IScalerPerCluster clusterScaler = new NullScaler();
Thread jmThread = null;
int workerCount = job.getNumberOfWorkers();
LOG.info("Worker Count..: " + workerCount);
// if you want to set it manually
// if (JobMasterContext.jobMasterIP(config) != null) {
// hostAddress = JobMasterContext.jobMasterIP(config);
// }
LOG.log(Level.INFO, String.format("Starting the Job Master: %s:%d", hostAddress, port));
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, hostAddress, nt, job, jobMasterNodeInfo, clusterScaler, initialState);
jobMaster.addShutdownHook(true);
try {
jobMaster.startJobMasterBlocking();
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, e.getMessage(), e);
}
// jmThread = jobMaster.startJobMasterThreaded();
waitIndefinitely();
registrar.deleteJobMasterZNode();
registrar.close();
boolean start = controller.start(job);
// }
return start;
}
use of edu.iu.dsc.tws.api.driver.NullScaler in project twister2 by DSC-SPIDAL.
the class NomadMasterStarter method launch.
/**
* launch the job master
*
* @return false if setup fails
*/
public boolean launch() {
// get the job working directory
String jobWorkingDirectory = NomadContext.workingDirectory(config);
LOG.log(Level.INFO, "job working directory ....." + jobWorkingDirectory);
if (NomadContext.sharedFileSystem(config)) {
if (!setupWorkingDirectory(job, jobWorkingDirectory)) {
throw new RuntimeException("Failed to setup the directory");
}
}
Config newConfig = Config.newBuilder().putAll(config).put(SchedulerContext.WORKING_DIRECTORY, jobWorkingDirectory).build();
// now start the controller, which will get the resources from
// slurm and start the job
IController controller = new NomadController(true);
controller.initialize(newConfig);
// start the Job Master locally
JobMaster jobMaster = null;
Thread jmThread = null;
if (JobMasterContext.jobMasterRunsInClient(config)) {
try {
int port = JobMasterContext.jobMasterPort(config);
String hostAddress = JobMasterContext.jobMasterIP(config);
if (hostAddress == null) {
hostAddress = InetAddress.getLocalHost().getHostAddress();
}
LOG.log(Level.INFO, String.format("Starting the job manager: %s:%d", hostAddress, port));
JobMasterAPI.NodeInfo jobMasterNodeInfo = NomadContext.getNodeInfo(config, hostAddress);
IScalerPerCluster clusterScaler = new NullScaler();
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, hostAddress, nt, job, jobMasterNodeInfo, clusterScaler, initialState);
jobMaster.addShutdownHook(true);
jmThread = jobMaster.startJobMasterThreaded();
} catch (UnknownHostException e) {
LOG.log(Level.SEVERE, "Exception when getting local host address: ", e);
throw new RuntimeException(e);
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
}
boolean start = controller.start(job);
// now lets wait on client
if (JobMasterContext.jobMasterRunsInClient(config)) {
try {
if (jmThread != null) {
jmThread.join();
}
} catch (InterruptedException ignore) {
}
}
return start;
}
Aggregations