use of edu.iu.dsc.tws.api.compute.graph.ComputeGraph in project twister2 by DSC-SPIDAL.
the class StormBenchmark method execute.
@Override
public void execute() {
Integer parallelSources = this.config.getIntegerValue("parallel-sources", 256);
ComputeGraphBuilder computeGraphBuilder = ComputeGraphBuilder.newBuilder(Config.newBuilder().build());
Generator generator = new Generator();
DataSink dataSink = new DataSink();
computeGraphBuilder.addSource("generator", generator, parallelSources);
if ("reduce".equals(config.get(PARAM_OPERATION))) {
computeGraphBuilder.addCompute("sink", dataSink).reduce("generator").viaEdge("edge").withOperation(Op.SUM, MessageTypes.DOUBLE_ARRAY);
} else {
computeGraphBuilder.addCompute("sink", dataSink).gather("generator").viaEdge("edge");
}
computeGraphBuilder.setMode(OperationMode.STREAMING);
ComputeGraph build = computeGraphBuilder.build();
this.taskExecutor.execute(build, taskExecutor.plan(build));
}
use of edu.iu.dsc.tws.api.compute.graph.ComputeGraph in project twister2 by DSC-SPIDAL.
the class TeraSort method execute.
@Override
public void execute(WorkerEnvironment workerEnv) {
int workerID = workerEnv.getWorkerId();
ComputeEnvironment cEnv = ComputeEnvironment.init(workerEnv);
Config config = workerEnv.getConfig();
resultsRecorder = new BenchmarkResultsRecorder(config, workerID == 0);
Timing.setDefaultTimingUnit(TimingUnit.MILLI_SECONDS);
final String filePath = config.getStringValue(ARG_INPUT_FILE, null);
final int keySize = config.getIntegerValue(ARG_KEY_SIZE, 10);
final int valueSize = config.getIntegerValue(ARG_VALUE_SIZE, 90);
// Sampling Graph : if file based only
TaskPartitioner taskPartitioner;
if (filePath != null) {
ComputeGraphBuilder samplingGraph = ComputeGraphBuilder.newBuilder(config);
samplingGraph.setMode(OperationMode.BATCH);
Sampler samplerTask = new Sampler();
samplingGraph.addSource(TASK_SAMPLER, samplerTask, config.getIntegerValue(ARG_TASKS_SOURCES, 4));
SamplerReduce samplerReduce = new SamplerReduce();
samplingGraph.addCompute(TASK_SAMPLER_REDUCE, samplerReduce, config.getIntegerValue(ARG_RESOURCE_INSTANCES, 4)).allreduce(TASK_SAMPLER).viaEdge(EDGE).withReductionFunction(byte[].class, (minMax1, minMax2) -> {
byte[] min1 = Arrays.copyOfRange(minMax1, 0, keySize);
byte[] max1 = Arrays.copyOfRange(minMax1, keySize, minMax1.length);
byte[] min2 = Arrays.copyOfRange(minMax2, 0, keySize);
byte[] max2 = Arrays.copyOfRange(minMax2, keySize, minMax2.length);
byte[] newMinMax = new byte[keySize * 2];
byte[] min = min1;
byte[] max = max1;
if (ByteArrayComparator.getInstance().compare(min1, min2) > 0) {
min = min2;
}
if (ByteArrayComparator.getInstance().compare(max1, max2) < 0) {
max = max2;
}
System.arraycopy(min, 0, newMinMax, 0, keySize);
System.arraycopy(max, 0, newMinMax, keySize, keySize);
return newMinMax;
});
ComputeGraph sampleGraphBuild = samplingGraph.build();
ExecutionPlan sampleTaskPlan = cEnv.getTaskExecutor().plan(sampleGraphBuild);
cEnv.getTaskExecutor().execute(sampleGraphBuild, sampleTaskPlan);
DataObject<byte[]> output = cEnv.getTaskExecutor().getOutput("sample-reduce");
LOG.info("Sample output received");
taskPartitioner = new TaskPartitionerForSampledData(output.getPartitions()[0].getConsumer().next(), keySize);
} else {
taskPartitioner = new TaskPartitionerForRandom();
}
// Sort Graph
ComputeGraphBuilder teraSortTaskGraph = ComputeGraphBuilder.newBuilder(config);
teraSortTaskGraph.setMode(OperationMode.BATCH);
BaseSource dataSource;
if (filePath == null) {
dataSource = new RandomDataSource();
} else {
dataSource = new FileDataSource();
}
teraSortTaskGraph.addSource(TASK_SOURCE, dataSource, config.getIntegerValue(ARG_TASKS_SOURCES, 4));
Receiver receiver = new Receiver();
KeyedGatherConfig keyedGatherConfig = teraSortTaskGraph.addCompute(TASK_RECV, receiver, config.getIntegerValue(ARG_TASKS_SINKS, 4)).keyedGather(TASK_SOURCE).viaEdge(EDGE).withDataType(MessageTypes.BYTE_ARRAY).withKeyType(MessageTypes.BYTE_ARRAY).withTaskPartitioner(taskPartitioner).useDisk(true).sortBatchByKey(ByteArrayComparator.getInstance()).groupBatchByKey(false);
if (config.getBooleanValue(ARG_FIXED_SCHEMA, false)) {
LOG.info("Using fixed schema feature with message size : " + (keySize + valueSize) + " and key size : " + keySize);
keyedGatherConfig.withMessageSchema(MessageSchema.ofSize(keySize + valueSize, keySize));
}
ComputeGraph computeGraph = teraSortTaskGraph.build();
ExecutionPlan executionPlan = cEnv.getTaskExecutor().plan(computeGraph);
cEnv.getTaskExecutor().execute(computeGraph, executionPlan);
cEnv.close();
LOG.info("Finished Sorting...");
}
use of edu.iu.dsc.tws.api.compute.graph.ComputeGraph in project twister2 by DSC-SPIDAL.
the class BatchTaskSchedulerExample method execute.
@Override
public void execute(WorkerEnvironment workerEnv) {
int workerId = workerEnv.getWorkerId();
Config config = workerEnv.getConfig();
long startTime = System.currentTimeMillis();
LOG.log(Level.FINE, "Task worker starting: " + workerId);
ComputeEnvironment cEnv = ComputeEnvironment.init(workerEnv);
TaskExecutor taskExecutor = cEnv.getTaskExecutor();
// Independent Graph and it has collector
ComputeGraph firstGraph = buildFirstGraph(2, config);
// Dependent Graph and it has collector
ComputeGraph secondGraph = buildSecondGraph(4, config);
// Dependent Graph and it has receptor to receive the input from second graph or first graph
ComputeGraph thirdGraph = buildThirdGraph(4, config);
ComputeGraph[] computeGraphs = new ComputeGraph[] { firstGraph, secondGraph, thirdGraph };
// Get the execution plan for the first task graph
ExecutionPlan firstGraphExecutionPlan = taskExecutor.plan(firstGraph);
// Get the execution plan for the second task graph
ExecutionPlan secondGraphExecutionPlan = taskExecutor.plan(secondGraph);
// Get the execution plan for the third task graph
ExecutionPlan thirdGraphExecutionPlan = taskExecutor.plan(thirdGraph);
taskExecutor.execute(firstGraph, firstGraphExecutionPlan);
taskExecutor.execute(secondGraph, secondGraphExecutionPlan);
taskExecutor.execute(thirdGraph, thirdGraphExecutionPlan);
// This is to test all the three graphs as dependent
/*Map<String, ExecutionPlan> taskExecutionPlan = taskExecutor.plan(computeGraphs);
for (Map.Entry<String, ExecutionPlan> planEntry : taskExecutionPlan.entrySet()) {
String graphName = planEntry.getKey();
if (graphName.equals(computeGraphs[0].getGraphName())) {
taskExecutor.execute(computeGraphs[0], planEntry.getValue());
} else if (graphName.equals(computeGraphs[1].getGraphName())) {
taskExecutor.execute(computeGraphs[1], planEntry.getValue());
} else {
taskExecutor.execute(computeGraphs[2], planEntry.getValue());
}
}*/
cEnv.close();
long endTime = System.currentTimeMillis();
LOG.info("Total Execution Time: " + (endTime - startTime));
}
use of edu.iu.dsc.tws.api.compute.graph.ComputeGraph in project twister2 by DSC-SPIDAL.
the class ParallelDataFlowsExample method generateSecondJob.
private static DataFlowGraph generateSecondJob(Config config, int parallelismValue, int workers, DataFlowJobConfig jobConfig) {
ConnectedSource connectedSource = new ConnectedSource("reduce", "first_out");
ConnectedSink connectedSink = new ConnectedSink();
ComputeGraphBuilder graphBuilderX = ComputeGraphBuilder.newBuilder(config);
graphBuilderX.addSource("source1", connectedSource, parallelismValue);
ComputeConnection reduceConn = graphBuilderX.addCompute("sink1", connectedSink, 1);
reduceConn.reduce("source1").viaEdge("reduce").withReductionFunction(new Aggregator()).withDataType(MessageTypes.OBJECT);
graphBuilderX.setMode(OperationMode.BATCH);
ComputeGraph batchGraph = graphBuilderX.build();
DataFlowGraph job = DataFlowGraph.newSubGraphJob("second_graph", batchGraph).setWorkers(workers).addDataFlowJobConfig(jobConfig).setGraphType("non-iterative");
return job;
}
use of edu.iu.dsc.tws.api.compute.graph.ComputeGraph in project twister2 by DSC-SPIDAL.
the class ParallelDataFlowsExample method generateFirstJob.
private static DataFlowGraph generateFirstJob(Config config, int parallelismValue, int workers, DataFlowJobConfig jobConfig) {
FirstSourceTask firstSourceTask = new FirstSourceTask();
ConnectedSink connectedSink = new ConnectedSink("first_out");
ComputeGraphBuilder graphBuilderX = ComputeGraphBuilder.newBuilder(config);
graphBuilderX.addSource("source1", firstSourceTask, parallelismValue);
ComputeConnection partitionConnection = graphBuilderX.addCompute("sink1", connectedSink, parallelismValue);
partitionConnection.partition("source1").viaEdge("partition").withDataType(MessageTypes.OBJECT);
graphBuilderX.setMode(OperationMode.BATCH);
ComputeGraph batchGraph = graphBuilderX.build();
DataFlowGraph job = DataFlowGraph.newSubGraphJob("first_graph", batchGraph).setWorkers(workers).addDataFlowJobConfig(jobConfig).setGraphType("non-iterative");
return job;
}
Aggregations