use of edu.iu.dsc.tws.tset.sets.batch.SinkTSet in project twister2 by DSC-SPIDAL.
the class ArrowTSetSourceExample method execute.
@Override
public void execute(WorkerEnvironment workerEnv) {
BatchEnvironment env = TSetEnvironment.initBatch(workerEnv);
Config config = env.getConfig();
String csvInputDirectory = config.getStringValue(DataObjectConstants.DINPUT_DIRECTORY);
String arrowInputDirectory = config.getStringValue(DataObjectConstants.ARROW_DIRECTORY);
String arrowFileName = config.getStringValue(DataObjectConstants.FILE_NAME);
int workers = config.getIntegerValue(DataObjectConstants.WORKERS);
int parallel = config.getIntegerValue(DataObjectConstants.PARALLELISM_VALUE);
int dsize = config.getIntegerValue(DataObjectConstants.DSIZE);
LOG.info("arrow input file:" + arrowFileName + "\t" + arrowInputDirectory + "\t" + csvInputDirectory + "\t" + workers + "\t" + parallel);
Schema schema = makeSchema();
SourceTSet<String[]> csvSource = env.createCSVSource(csvInputDirectory, dsize, parallel, "split");
SinkTSet<Iterator<Integer>> sinkTSet = csvSource.direct().map((MapFunc<String[], Integer>) input -> Integer.parseInt(input[0])).direct().sink(new ArrowBasedSinkFunction<>(arrowInputDirectory, arrowFileName, schema.toJson()));
env.run(sinkTSet);
// Source Function Call
env.createArrowSource(arrowInputDirectory, arrowFileName, parallel, schema.toJson()).direct().compute((ComputeFunc<Iterator<Object>, List<Integer>>) input -> {
List<Integer> integers = new ArrayList<>();
input.forEachRemaining(i -> integers.add((Integer) i));
return integers;
}).direct().forEach(s -> LOG.info("Integer Array Size:" + s.size() + "\tvalues:" + s));
}
use of edu.iu.dsc.tws.tset.sets.batch.SinkTSet in project twister2 by DSC-SPIDAL.
the class ReduceExample method execute.
@Override
public void execute(WorkerEnvironment workerEnv) {
BatchEnvironment env = TSetEnvironment.initBatch(workerEnv);
int start = env.getWorkerID() * 100;
SourceTSet<Integer> src = dummySource(env, start, COUNT, PARALLELISM);
ReduceTLink<Integer> reduce = src.reduce(Integer::sum);
LOG.info("test foreach");
reduce.forEach(i -> LOG.info("foreach: " + i));
LOG.info("test map");
reduce.map(i -> i.toString() + "$$").withSchema(PrimitiveSchemas.STRING).direct().forEach(s -> LOG.info("map: " + s));
LOG.info("test flat map");
reduce.flatmap((i, c) -> c.collect(i.toString() + "##")).withSchema(PrimitiveSchemas.STRING).direct().forEach(s -> LOG.info("flat:" + s));
LOG.info("test compute");
reduce.compute((ComputeFunc<Integer, String>) input -> "sum=" + input).withSchema(PrimitiveSchemas.STRING).direct().forEach(s -> LOG.info("compute: " + s));
LOG.info("test computec");
reduce.compute((ComputeCollectorFunc<Integer, String>) (input, output) -> output.collect("sum=" + input)).withSchema(PrimitiveSchemas.STRING).direct().forEach(s -> LOG.info("computec: " + s));
LOG.info("test map2tup");
reduce.mapToTuple(i -> new Tuple<>(i, i.toString())).keyedDirect().forEach(i -> LOG.info("mapToTuple: " + i.toString()));
LOG.info("test sink");
SinkTSet<Integer> sink = reduce.sink((SinkFunc<Integer>) value -> {
LOG.info("val =" + value);
return true;
});
env.run(sink);
}
use of edu.iu.dsc.tws.tset.sets.batch.SinkTSet in project twister2 by DSC-SPIDAL.
the class TSetGatherExample method execute.
@Override
public void execute(WorkerEnvironment workerEnv) {
super.execute(workerEnv);
BatchEnvironment env = TSetEnvironment.initBatch(workerEnv);
// set the parallelism of source to task stage 0
int srcPara = jobParameters.getTaskStages().get(0);
int sinkPara = jobParameters.getTaskStages().get(1);
SourceTSet<int[]> source = env.createSource(new TestBaseSource(), srcPara).setName("Source");
GatherTLink<int[]> gather = source.gather();
SinkTSet<Iterator<Tuple<Integer, int[]>>> sink = gather.sink((SinkFunc<Iterator<Tuple<Integer, int[]>>>) val -> {
int[] value = null;
while (val.hasNext()) {
value = val.next().getValue();
}
experimentData.setOutput(value);
LOG.info("Results " + Arrays.toString(value));
try {
verify(OperationNames.GATHER);
} catch (VerificationException e) {
LOG.info("Exception Message : " + e.getMessage());
}
return true;
});
env.run(sink);
}
use of edu.iu.dsc.tws.tset.sets.batch.SinkTSet in project twister2 by DSC-SPIDAL.
the class Twister2TranslationContext method execute.
public void execute() {
Map<String, CachedTSet> sideInputTSets = new HashMap<>();
for (Map.Entry<String, BatchTSet<?>> sides : sideInputDataSets.entrySet()) {
CachedTSet tempCache = (CachedTSet) sides.getValue().cache();
sideInputTSets.put(sides.getKey(), tempCache);
}
for (TSet leaf : leaves) {
SinkTSet sinkTSet = (SinkTSet) leaf.direct().sink(new Twister2SinkFunction());
addInputs(sinkTSet, sideInputTSets);
eval(sinkTSet);
}
}
use of edu.iu.dsc.tws.tset.sets.batch.SinkTSet in project twister2 by DSC-SPIDAL.
the class HadoopTSet method execute.
@Override
public void execute(Config config, JobAPI.Job job, IWorkerController workerController, IPersistentVolume persistentVolume, IVolatileVolume volatileVolume) {
int workerId = workerController.getWorkerInfo().getWorkerID();
WorkerEnvironment workerEnv = WorkerEnvironment.init(config, job, workerController, persistentVolume, volatileVolume);
BatchEnvironment tSetEnv = TSetEnvironment.initBatch(workerEnv);
Configuration configuration = new Configuration();
configuration.addResource(new Path(HdfsDataContext.getHdfsConfigDirectory(config)));
configuration.set(TextInputFormat.INPUT_DIR, "/input4");
SourceTSet<String> source = tSetEnv.createHadoopSource(configuration, TextInputFormat.class, 4, new MapFunc<Tuple<LongWritable, Text>, String>() {
@Override
public String map(Tuple<LongWritable, Text> input) {
return input.getKey().toString() + " : " + input.getValue().toString();
}
});
SinkTSet<Iterator<String>> sink = source.direct().sink((SinkFunc<Iterator<String>>) value -> {
while (value.hasNext()) {
String next = value.next();
LOG.info("Received value: " + next);
}
return true;
});
tSetEnv.run(sink);
}
Aggregations