use of edu.iu.dsc.tws.api.resource.IPersistentVolume in project twister2 by DSC-SPIDAL.
the class HadoopTSet method execute.
@Override
public void execute(Config config, JobAPI.Job job, IWorkerController workerController, IPersistentVolume persistentVolume, IVolatileVolume volatileVolume) {
int workerId = workerController.getWorkerInfo().getWorkerID();
WorkerEnvironment workerEnv = WorkerEnvironment.init(config, job, workerController, persistentVolume, volatileVolume);
BatchEnvironment tSetEnv = TSetEnvironment.initBatch(workerEnv);
Configuration configuration = new Configuration();
configuration.addResource(new Path(HdfsDataContext.getHdfsConfigDirectory(config)));
configuration.set(TextInputFormat.INPUT_DIR, "/input4");
SourceTSet<String> source = tSetEnv.createHadoopSource(configuration, TextInputFormat.class, 4, new MapFunc<Tuple<LongWritable, Text>, String>() {
@Override
public String map(Tuple<LongWritable, Text> input) {
return input.getKey().toString() + " : " + input.getValue().toString();
}
});
SinkTSet<Iterator<String>> sink = source.direct().sink((SinkFunc<Iterator<String>>) value -> {
while (value.hasNext()) {
String next = value.next();
LOG.info("Received value: " + next);
}
return true;
});
tSetEnv.run(sink);
}
use of edu.iu.dsc.tws.api.resource.IPersistentVolume in project twister2 by DSC-SPIDAL.
the class MPIWorkerStarter method startWorker.
/**
* Start the worker
*
* @param intracomm communication
*/
private void startWorker(Intracomm intracomm) {
try {
// initialize the logger
initWorkerLogger(config, intracomm.getRank());
// now create the worker
IWorkerController wc = WorkerRuntime.getWorkerController();
IPersistentVolume persistentVolume = initPersistenceVolume(config, globalRank);
MPIContext.addRuntimeObject("comm", intracomm);
IWorker worker = JobUtils.initializeIWorker(job);
MPIWorkerManager workerManager = new MPIWorkerManager();
workerManager.execute(config, job, wc, persistentVolume, null, worker);
} catch (MPIException e) {
LOG.log(Level.SEVERE, "Failed to synchronize the workers at the start");
throw new RuntimeException(e);
}
}
Aggregations