use of edu.iu.dsc.tws.rsched.schedulers.NullTerminator in project twister2 by DSC-SPIDAL.
the class MPILauncher method launch.
@Override
public Twister2JobState launch(JobAPI.Job job) {
LOG.log(Level.INFO, "Launching job for cluster {0}", MPIContext.clusterType(config));
Twister2JobState state = new Twister2JobState(false);
if (!configsOK()) {
return state;
}
// distributing bundle if not running in shared file system
if (!MPIContext.isSharedFs(config)) {
LOG.info("Configured as NON SHARED file system. " + "Running bootstrap procedure to distribute files...");
try {
this.distributeJobFiles(job);
} catch (IOException e) {
LOG.log(Level.SEVERE, "Error in distributing job files", e);
throw new RuntimeException("Error in distributing job files");
}
} else {
LOG.info("Configured as SHARED file system. " + "Skipping bootstrap procedure & setting up working directory");
if (!setupWorkingDirectory(job.getJobId())) {
throw new RuntimeException("Failed to setup the directory");
}
}
config = Config.newBuilder().putAll(config).put(SchedulerContext.WORKING_DIRECTORY, jobWorkingDirectory).build();
JobMaster jobMaster = null;
Thread jmThread = null;
if (JobMasterContext.isJobMasterUsed(config) && JobMasterContext.jobMasterRunsInClient(config)) {
// Since the job master is running on client we can collect job information
state.setDetached(false);
try {
int port = NetworkUtils.getFreePort();
String hostAddress = JobMasterContext.jobMasterIP(config);
if (hostAddress == null) {
hostAddress = ResourceSchedulerUtils.getHostIP(config);
}
// add the port and ip to config
config = Config.newBuilder().putAll(config).put("__job_master_port__", port).put("__job_master_ip__", hostAddress).build();
LOG.log(Level.INFO, String.format("Starting the job master: %s:%d", hostAddress, port));
JobMasterAPI.NodeInfo jobMasterNodeInfo = NodeInfoUtils.createNodeInfo(hostAddress, "default", "default");
IScalerPerCluster nullScaler = new NullScaler();
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, "0.0.0.0", port, nt, job, jobMasterNodeInfo, nullScaler, initialState);
jobMaster.addShutdownHook(true);
jmThread = jobMaster.startJobMasterThreaded();
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
}
final boolean[] start = { false };
// now start the controller, which will get the resources and start
Thread controllerThread = new Thread(() -> {
IController controller = new MPIController(true);
controller.initialize(config);
start[0] = controller.start(job);
});
controllerThread.setName("MPIController");
controllerThread.start();
// wait until the controller finishes
try {
controllerThread.join();
} catch (InterruptedException ignore) {
}
// now lets wait on client
if (jmThread != null && JobMasterContext.isJobMasterUsed(config) && JobMasterContext.jobMasterRunsInClient(config)) {
try {
jmThread.join();
} catch (InterruptedException ignore) {
}
}
if (jobMaster != null && jobMaster.getDriver() != null) {
if (jobMaster.getDriver().getState() != DriverJobState.FAILED) {
state.setJobstate(DriverJobState.COMPLETED);
} else {
state.setJobstate(jobMaster.getDriver().getState());
}
state.setFinalMessages(jobMaster.getDriver().getMessages());
}
state.setRequestGranted(start[0]);
return state;
}
use of edu.iu.dsc.tws.rsched.schedulers.NullTerminator in project twister2 by DSC-SPIDAL.
the class MPIWorkerStarter method startMaster.
/**
* Start the JobMaster
*/
private void startMaster() {
try {
// init the logger
initJMLogger(config);
// release the port for JM
NetworkUtils.releaseWorkerPorts();
int port = JobMasterContext.jobMasterPort(config);
String hostAddress = ResourceSchedulerUtils.getHostIP(config);
LOG.log(Level.INFO, String.format("Starting the job master: %s:%d", hostAddress, port));
JobMasterAPI.NodeInfo jobMasterNodeInfo = null;
IScalerPerCluster clusterScaler = new NullScaler();
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, "0.0.0.0", port, nt, job, jobMasterNodeInfo, clusterScaler, initialState);
jobMaster.startJobMasterBlocking();
LOG.log(Level.INFO, "JobMaster done... ");
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
}
use of edu.iu.dsc.tws.rsched.schedulers.NullTerminator in project twister2 by DSC-SPIDAL.
the class JobMasterExample method main.
/**
* this main method is for locally testing only
* A JobMaster instance is started locally on the default port:
* edu.iu.dsc.tws.master.JobMasterContext.JOB_MASTER_PORT_DEFAULT = 11011
* <p>
* numberOfWorkers to join is expected as a parameter
* <p>
* When all workers joined and all have sent completed messages,
* this server also completes and exits
* <p>
* En example usage of JobMaster can be seen in:
* edu.iu.dsc.tws.rsched.schedulers.k8s.master.JobMasterStarter
*/
public static void main(String[] args) {
if (args.length != 1) {
LOG.info("usage: java JobMasterExample numberOfWorkers");
return;
}
int numberOfWorkers = Integer.parseInt(args[0]);
String host = "0.0.0.0";
// we assume that the twister2Home is the current directory
// String configDir = "../twister2/config/src/yaml/";
String configDir = "";
String twister2Home = Paths.get(configDir).toAbsolutePath().toString();
Config config = ConfigLoader.loadConfig(twister2Home, "conf", "kubernetes");
config = JobMasterClientExample.updateConfig(config, config, host);
LOG.info("Loaded: " + config.size() + " configuration parameters.");
// Twister2Job twister2Job = Twister2Job.loadTwister2Job(config, null);
Twister2Job twister2Job = Twister2Job.newBuilder().setJobName("hello-world-job").setWorkerClass(HelloWorld.class).addComputeResource(.2, 128, numberOfWorkers).build();
twister2Job.setUserName(System.getProperty("user.name"));
JobAPI.Job job = twister2Job.serialize();
LOG.info("JobID: " + job.getJobId());
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
JobMasterStarter.job = job;
if (ZKContext.isZooKeeperServerUsed(config)) {
if ("start".equalsIgnoreCase(args[0])) {
JobMasterStarter.initializeZooKeeper(config, job.getJobId(), host, initialState);
} else if ("restart".equalsIgnoreCase(args[0])) {
initialState = JobMasterAPI.JobMasterState.JM_RESTARTED;
JobMasterStarter.initializeZooKeeper(config, job.getJobId(), host, initialState);
job = JobMasterStarter.job;
} else {
LOG.info("usage: java JobMasterExample start/restart");
return;
}
}
// write jobID to file
String dir = System.getProperty("user.home") + "/.twister2";
if (!FileUtils.isDirectoryExists(dir)) {
FileUtils.createDirectory(dir);
}
String filename = dir + "/last-job-id.txt";
FileUtils.writeToFile(filename, (job.getJobId() + "").getBytes(), true);
LOG.info("Written jobID to file: " + job.getJobId());
String ip = null;
try {
ip = Inet4Address.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
LOG.log(Level.SEVERE, e.getMessage(), e);
return;
}
JobMasterAPI.NodeInfo jobMasterNode = NodeInfoUtils.createNodeInfo(ip, null, null);
KubernetesController controller = KubernetesController.init("default");
K8sScaler k8sScaler = new K8sScaler(config, job, controller);
IJobTerminator jobTerminator = new NullTerminator();
JobMaster jobMaster = new JobMaster(config, host, jobTerminator, job, jobMasterNode, k8sScaler, initialState);
try {
// jobMaster.startJobMasterThreaded();
jobMaster.startJobMasterBlocking();
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
LOG.info("Threaded Job Master started:" + "\nnumberOfWorkers: " + job.getNumberOfWorkers() + "\njobID: " + job.getJobId());
}
use of edu.iu.dsc.tws.rsched.schedulers.NullTerminator in project twister2 by DSC-SPIDAL.
the class NomadJobMasterStarter method launch.
/**
* launch the job master
*
* @return false if setup fails
*/
public boolean launch() {
// get the job working directory
/* String jobWorkingDirectory = NomadContext.workingDirectory(config);
LOG.log(Level.INFO, "job working directory ....." + jobWorkingDirectory);
if (NomadContext.sharedFileSystem(config)) {
if (!setupWorkingDirectory(job, jobWorkingDirectory)) {
throw new RuntimeException("Failed to setup the directory");
}
}
Config newConfig = Config.newBuilder().putAll(config).put(
SchedulerContext.WORKING_DIRECTORY, jobWorkingDirectory).build();
// now start the controller, which will get the resources from
// slurm and start the job
//IController controller = new NomadController(true);
controller.initialize(newConfig);*/
String indexEnv = System.getenv("NOMAD_ALLOC_INDEX");
String idEnv = System.getenv("NOMAD_ALLOC_ID");
int workerID = Integer.valueOf(indexEnv);
MPIWorkerStarter.initJMLogger(config);
LOG.log(Level.INFO, String.format("Worker id = %s and index = %d", idEnv, workerID));
ZKJobMasterRegistrar registrar = null;
int port = JobMasterContext.jobMasterPort(config);
String hostAddress = null;
try {
hostAddress = Inet4Address.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
e.printStackTrace();
}
try {
registrar = new ZKJobMasterRegistrar(config, hostAddress, port, job.getJobId());
LOG.info("JobMaster REGISTERED..:" + hostAddress);
} catch (Exception e) {
LOG.info("JobMaster CAN NOT BE REGISTERED:");
e.printStackTrace();
}
boolean initialized = registrar.initialize();
if (!initialized) {
LOG.info("CAN NOT INITIALIZE");
}
if (!initialized && registrar.sameZNodeExist()) {
registrar.deleteJobMasterZNode();
registrar.initialize();
}
// start the Job Master locally
JobMaster jobMaster = null;
JobMasterAPI.NodeInfo jobMasterNodeInfo = NomadContext.getNodeInfo(config, hostAddress);
IScalerPerCluster clusterScaler = new NullScaler();
Thread jmThread = null;
int workerCount = job.getNumberOfWorkers();
LOG.info("Worker Count..: " + workerCount);
// if you want to set it manually
// if (JobMasterContext.jobMasterIP(config) != null) {
// hostAddress = JobMasterContext.jobMasterIP(config);
// }
LOG.log(Level.INFO, String.format("Starting the Job Master: %s:%d", hostAddress, port));
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, hostAddress, nt, job, jobMasterNodeInfo, clusterScaler, initialState);
jobMaster.addShutdownHook(true);
try {
jobMaster.startJobMasterBlocking();
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, e.getMessage(), e);
}
// jmThread = jobMaster.startJobMasterThreaded();
waitIndefinitely();
registrar.deleteJobMasterZNode();
registrar.close();
boolean start = controller.start(job);
// }
return start;
}
use of edu.iu.dsc.tws.rsched.schedulers.NullTerminator in project twister2 by DSC-SPIDAL.
the class NomadMasterStarter method launch.
/**
* launch the job master
*
* @return false if setup fails
*/
public boolean launch() {
// get the job working directory
String jobWorkingDirectory = NomadContext.workingDirectory(config);
LOG.log(Level.INFO, "job working directory ....." + jobWorkingDirectory);
if (NomadContext.sharedFileSystem(config)) {
if (!setupWorkingDirectory(job, jobWorkingDirectory)) {
throw new RuntimeException("Failed to setup the directory");
}
}
Config newConfig = Config.newBuilder().putAll(config).put(SchedulerContext.WORKING_DIRECTORY, jobWorkingDirectory).build();
// now start the controller, which will get the resources from
// slurm and start the job
IController controller = new NomadController(true);
controller.initialize(newConfig);
// start the Job Master locally
JobMaster jobMaster = null;
Thread jmThread = null;
if (JobMasterContext.jobMasterRunsInClient(config)) {
try {
int port = JobMasterContext.jobMasterPort(config);
String hostAddress = JobMasterContext.jobMasterIP(config);
if (hostAddress == null) {
hostAddress = InetAddress.getLocalHost().getHostAddress();
}
LOG.log(Level.INFO, String.format("Starting the job manager: %s:%d", hostAddress, port));
JobMasterAPI.NodeInfo jobMasterNodeInfo = NomadContext.getNodeInfo(config, hostAddress);
IScalerPerCluster clusterScaler = new NullScaler();
JobMasterAPI.JobMasterState initialState = JobMasterAPI.JobMasterState.JM_STARTED;
NullTerminator nt = new NullTerminator();
jobMaster = new JobMaster(config, hostAddress, nt, job, jobMasterNodeInfo, clusterScaler, initialState);
jobMaster.addShutdownHook(true);
jmThread = jobMaster.startJobMasterThreaded();
} catch (UnknownHostException e) {
LOG.log(Level.SEVERE, "Exception when getting local host address: ", e);
throw new RuntimeException(e);
} catch (Twister2Exception e) {
LOG.log(Level.SEVERE, "Exception when starting Job master: ", e);
throw new RuntimeException(e);
}
}
boolean start = controller.start(job);
// now lets wait on client
if (JobMasterContext.jobMasterRunsInClient(config)) {
try {
if (jmThread != null) {
jmThread.join();
}
} catch (InterruptedException ignore) {
}
}
return start;
}
Aggregations