Search in sources :

Example 6 with Task

use of org.apache.sysml.runtime.controlprogram.parfor.Task in project incubator-systemml by apache.

the class ParForProgramBlock method executeRemoteMRParFor.

private void executeRemoteMRParFor(ExecutionContext ec, IntObject itervar, IntObject from, IntObject to, IntObject incr) throws IOException {
    /* Step 0) check and recompile MR inst
		 * Step 1) serialize child PB and inst
		 * Step 2) create and serialize tasks
		 * Step 3) submit MR Jobs and wait for results
		 * Step 4) collect results from each parallel worker
		 */
    Timing time = (_monitor ? new Timing(true) : null);
    // Step 0) check and compile to CP (if forced remote parfor)
    boolean flagForced = false;
    if (FORCE_CP_ON_REMOTE_MR && (_optMode == POptMode.NONE || (_optMode == POptMode.CONSTRAINED && _execMode == PExecMode.REMOTE_MR))) {
        // tid = 0  because replaced in remote parworker
        flagForced = checkMRAndRecompileToCP(0);
    }
    // Step 1) init parallel workers (serialize PBs)
    // NOTES: each mapper changes filenames with regard to his ID as we submit a single
    // job, cannot reuse serialized string, since variables are serialized as well.
    ParForBody body = new ParForBody(_childBlocks, _resultVars, ec);
    String program = ProgramConverter.serializeParForBody(body);
    if (_monitor)
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_PARWRK_T, time.stop());
    // Step 2) create tasks
    TaskPartitioner partitioner = createTaskPartitioner(from, to, incr);
    String taskFile = constructTaskFileName();
    String resultFile = constructResultFileName();
    long numIterations = partitioner.getNumIterations();
    int maxDigits = (int) Math.log10(to.getLongValue()) + 1;
    long numCreatedTasks = -1;
    if (USE_STREAMING_TASK_CREATION) {
        LocalTaskQueue<Task> queue = new LocalTaskQueue<>();
        // put tasks into queue and start writing to taskFile
        numCreatedTasks = partitioner.createTasks(queue);
        taskFile = writeTasksToFile(taskFile, queue, maxDigits);
    } else {
        // sequentially create tasks and write to disk
        List<Task> tasks = partitioner.createTasks();
        numCreatedTasks = tasks.size();
        taskFile = writeTasksToFile(taskFile, tasks, maxDigits);
    }
    if (_monitor)
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_TASKS_T, time.stop());
    // write matrices to HDFS
    exportMatricesToHDFS(ec);
    // Step 3) submit MR job (wait for finished work)
    MatrixObject colocatedDPMatrixObj = (_colocatedDPMatrix != null) ? ec.getMatrixObject(_colocatedDPMatrix) : null;
    RemoteParForJobReturn ret = RemoteParForMR.runJob(_ID, program, taskFile, resultFile, colocatedDPMatrixObj, _enableCPCaching, _numThreads, WRITE_REPLICATION_FACTOR, MAX_RETRYS_ON_ERROR, getMinMemory(ec), (ALLOW_REUSE_MR_JVMS & _jvmReuse));
    if (_monitor)
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_WAIT_EXEC_T, time.stop());
    // Step 4) collecting results from each parallel worker
    int numExecutedTasks = ret.getNumExecutedTasks();
    int numExecutedIterations = ret.getNumExecutedIterations();
    // consolidate results into global symbol table
    consolidateAndCheckResults(ec, numIterations, numCreatedTasks, numExecutedIterations, numExecutedTasks, ret.getVariables());
    if (// see step 0
    flagForced)
        releaseForcedRecompile(0);
    if (_monitor) {
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_WAIT_RESULTS_T, time.stop());
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_NUMTASKS, numExecutedTasks);
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_NUMITERS, numExecutedIterations);
    }
}
Also used : ParForBody(org.apache.sysml.runtime.controlprogram.parfor.ParForBody) Task(org.apache.sysml.runtime.controlprogram.parfor.Task) MatrixObject(org.apache.sysml.runtime.controlprogram.caching.MatrixObject) RemoteParForJobReturn(org.apache.sysml.runtime.controlprogram.parfor.RemoteParForJobReturn) LocalTaskQueue(org.apache.sysml.runtime.controlprogram.parfor.LocalTaskQueue) Timing(org.apache.sysml.runtime.controlprogram.parfor.stat.Timing) TaskPartitioner(org.apache.sysml.runtime.controlprogram.parfor.TaskPartitioner)

Example 7 with Task

use of org.apache.sysml.runtime.controlprogram.parfor.Task in project incubator-systemml by apache.

the class ParForProgramBlock method writeTasksToFile.

private static String writeTasksToFile(String fname, List<Task> tasks, int maxDigits) throws IOException {
    BufferedWriter br = null;
    try {
        Path path = new Path(fname);
        FileSystem fs = IOUtilFunctions.getFileSystem(path);
        br = new BufferedWriter(new OutputStreamWriter(fs.create(path, true)));
        // workaround for keeping gen order
        boolean flagFirst = true;
        for (Task t : tasks) {
            br.write(createTaskFileLine(t, maxDigits, flagFirst));
            if (flagFirst)
                flagFirst = false;
        }
    } catch (Exception ex) {
        throw new DMLRuntimeException("Error writing tasks to taskfile " + fname, ex);
    } finally {
        IOUtilFunctions.closeSilently(br);
    }
    return fname;
}
Also used : Path(org.apache.hadoop.fs.Path) Task(org.apache.sysml.runtime.controlprogram.parfor.Task) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStreamWriter(java.io.OutputStreamWriter) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException)

Example 8 with Task

use of org.apache.sysml.runtime.controlprogram.parfor.Task in project systemml by apache.

the class ParForProgramBlock method executeRemoteMRParFor.

private void executeRemoteMRParFor(ExecutionContext ec, IntObject itervar, IntObject from, IntObject to, IntObject incr) throws IOException {
    /* Step 0) check and recompile MR inst
		 * Step 1) serialize child PB and inst
		 * Step 2) create and serialize tasks
		 * Step 3) submit MR Jobs and wait for results
		 * Step 4) collect results from each parallel worker
		 */
    Timing time = (_monitor ? new Timing(true) : null);
    // Step 0) check and compile to CP (if forced remote parfor)
    boolean flagForced = false;
    if (FORCE_CP_ON_REMOTE_MR && (_optMode == POptMode.NONE || (_optMode == POptMode.CONSTRAINED && _execMode == PExecMode.REMOTE_MR))) {
        // tid = 0  because replaced in remote parworker
        flagForced = checkMRAndRecompileToCP(0);
    }
    // Step 1) init parallel workers (serialize PBs)
    // NOTES: each mapper changes filenames with regard to his ID as we submit a single
    // job, cannot reuse serialized string, since variables are serialized as well.
    ParForBody body = new ParForBody(_childBlocks, _resultVars, ec);
    String program = ProgramConverter.serializeParForBody(body);
    if (_monitor)
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_PARWRK_T, time.stop());
    // Step 2) create tasks
    TaskPartitioner partitioner = createTaskPartitioner(from, to, incr);
    String taskFile = constructTaskFileName();
    String resultFile = constructResultFileName();
    long numIterations = partitioner.getNumIterations();
    int maxDigits = (int) Math.log10(to.getLongValue()) + 1;
    long numCreatedTasks = -1;
    if (USE_STREAMING_TASK_CREATION) {
        LocalTaskQueue<Task> queue = new LocalTaskQueue<>();
        // put tasks into queue and start writing to taskFile
        numCreatedTasks = partitioner.createTasks(queue);
        taskFile = writeTasksToFile(taskFile, queue, maxDigits);
    } else {
        // sequentially create tasks and write to disk
        List<Task> tasks = partitioner.createTasks();
        numCreatedTasks = tasks.size();
        taskFile = writeTasksToFile(taskFile, tasks, maxDigits);
    }
    if (_monitor)
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_TASKS_T, time.stop());
    // write matrices to HDFS
    exportMatricesToHDFS(ec);
    // Step 3) submit MR job (wait for finished work)
    MatrixObject colocatedDPMatrixObj = (_colocatedDPMatrix != null) ? ec.getMatrixObject(_colocatedDPMatrix) : null;
    RemoteParForJobReturn ret = RemoteParForMR.runJob(_ID, program, taskFile, resultFile, colocatedDPMatrixObj, _enableCPCaching, _numThreads, WRITE_REPLICATION_FACTOR, MAX_RETRYS_ON_ERROR, getMinMemory(ec), (ALLOW_REUSE_MR_JVMS & _jvmReuse));
    if (_monitor)
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_WAIT_EXEC_T, time.stop());
    // Step 4) collecting results from each parallel worker
    int numExecutedTasks = ret.getNumExecutedTasks();
    int numExecutedIterations = ret.getNumExecutedIterations();
    // consolidate results into global symbol table
    consolidateAndCheckResults(ec, numIterations, numCreatedTasks, numExecutedIterations, numExecutedTasks, ret.getVariables());
    if (// see step 0
    flagForced)
        releaseForcedRecompile(0);
    if (_monitor) {
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_WAIT_RESULTS_T, time.stop());
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_NUMTASKS, numExecutedTasks);
        StatisticMonitor.putPFStat(_ID, Stat.PARFOR_NUMITERS, numExecutedIterations);
    }
}
Also used : ParForBody(org.apache.sysml.runtime.controlprogram.parfor.ParForBody) Task(org.apache.sysml.runtime.controlprogram.parfor.Task) MatrixObject(org.apache.sysml.runtime.controlprogram.caching.MatrixObject) RemoteParForJobReturn(org.apache.sysml.runtime.controlprogram.parfor.RemoteParForJobReturn) LocalTaskQueue(org.apache.sysml.runtime.controlprogram.parfor.LocalTaskQueue) Timing(org.apache.sysml.runtime.controlprogram.parfor.stat.Timing) TaskPartitioner(org.apache.sysml.runtime.controlprogram.parfor.TaskPartitioner)

Example 9 with Task

use of org.apache.sysml.runtime.controlprogram.parfor.Task in project systemml by apache.

the class ParForProgramBlock method executeLocalParFor.

/**
 * Executes the parfor locally, i.e., the parfor is realized with numThreads local threads that drive execution.
 * This execution mode allows for arbitrary nested local parallelism and nested invocations of MR jobs. See
 * below for details of the realization.
 *
 * @param ec execution context
 * @param itervar ?
 * @param from ?
 * @param to ?
 * @param incr ?
 * @throws InterruptedException if InterruptedException occurs
 */
private void executeLocalParFor(ExecutionContext ec, IntObject itervar, IntObject from, IntObject to, IntObject incr) throws InterruptedException {
    LOG.trace("Local Par For (multi-threaded) with degree of parallelism : " + _numThreads);
    /* Step 1) init parallel workers, task queue and threads
		 *         start threads (from now on waiting for tasks)
		 * Step 2) create tasks
		 *         put tasks into queue
		 *         mark end of task input stream
		 * Step 3) join all threads (wait for finished work)
		 * Step 4) collect results from each parallel worker
		 */
    Timing time = new Timing(true);
    int numExecutedTasks = 0;
    int numExecutedIterations = 0;
    // restrict recompilation to thread local memory
    setMemoryBudget();
    // enable runtime piggybacking if required
    if (_enableRuntimePiggybacking)
        // default piggybacking worker
        RuntimePiggybacking.start(_numThreads);
    try {
        // Step 1) create task queue and init workers in parallel
        // (including preparation of update-in-place variables)
        LocalTaskQueue<Task> queue = new LocalTaskQueue<>();
        Thread[] threads = new Thread[_numThreads];
        LocalParWorker[] workers = new LocalParWorker[_numThreads];
        IntStream.range(0, _numThreads).parallel().forEach(i -> {
            workers[i] = createParallelWorker(_pwIDs[i], queue, ec, i);
            threads[i] = new Thread(workers[i]);
            threads[i].setPriority(Thread.MAX_PRIORITY);
        });
        // start threads (from now on waiting for tasks)
        for (Thread thread : threads) thread.start();
        // maintain statistics
        long tinit = (long) time.stop();
        if (DMLScript.STATISTICS)
            Statistics.incrementParForInitTime(tinit);
        if (_monitor)
            StatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_PARWRK_T, tinit);
        // Step 2) create tasks
        TaskPartitioner partitioner = createTaskPartitioner(from, to, incr);
        long numIterations = partitioner.getNumIterations();
        long numCreatedTasks = -1;
        if (USE_STREAMING_TASK_CREATION) {
            // put tasks into queue (parworker start work on first tasks while creating tasks)
            numCreatedTasks = partitioner.createTasks(queue);
        } else {
            List<Task> tasks = partitioner.createTasks();
            numCreatedTasks = tasks.size();
            // put tasks into queue
            for (Task t : tasks) queue.enqueueTask(t);
            // mark end of task input stream
            queue.closeInput();
        }
        if (_monitor)
            StatisticMonitor.putPFStat(_ID, Stat.PARFOR_INIT_TASKS_T, time.stop());
        // Step 3) join all threads (wait for finished work)
        for (Thread thread : threads) thread.join();
        if (_monitor)
            StatisticMonitor.putPFStat(_ID, Stat.PARFOR_WAIT_EXEC_T, time.stop());
        // Step 4) collecting results from each parallel worker
        // obtain results and cleanup other intermediates before result merge
        LocalVariableMap[] localVariables = new LocalVariableMap[_numThreads];
        for (int i = 0; i < _numThreads; i++) {
            localVariables[i] = workers[i].getVariables();
            localVariables[i].removeAllNotIn(_resultVars.stream().map(v -> v._name).collect(Collectors.toSet()));
            numExecutedTasks += workers[i].getExecutedTasks();
            numExecutedIterations += workers[i].getExecutedIterations();
        }
        // consolidate results into global symbol table
        consolidateAndCheckResults(ec, numIterations, numCreatedTasks, numExecutedIterations, numExecutedTasks, localVariables);
        // Step 5) cleanup local parworkers (e.g., remove created functions)
        for (int i = 0; i < _numThreads; i++) {
            Collection<String> fnNames = workers[i].getFunctionNames();
            if (fnNames != null && !fnNames.isEmpty())
                for (String fn : fnNames) {
                    String[] parts = DMLProgram.splitFunctionKey(fn);
                    _prog.removeFunctionProgramBlock(parts[0], parts[1]);
                }
        }
        // the main thread to use the GPUContext
        if (DMLScript.USE_ACCELERATOR) {
            ec.getGPUContext(0).initializeThread();
        }
    } finally {
        // remove thread-local memory budget (reset to original budget)
        // (in finally to prevent error side effects for multiple scripts in one jvm)
        resetMemoryBudget();
        // disable runtime piggybacking
        if (_enableRuntimePiggybacking)
            RuntimePiggybacking.stop();
        if (_monitor) {
            StatisticMonitor.putPFStat(_ID, Stat.PARFOR_WAIT_RESULTS_T, time.stop());
            StatisticMonitor.putPFStat(_ID, Stat.PARFOR_NUMTASKS, numExecutedTasks);
            StatisticMonitor.putPFStat(_ID, Stat.PARFOR_NUMITERS, numExecutedIterations);
        }
    }
}
Also used : Task(org.apache.sysml.runtime.controlprogram.parfor.Task) LocalTaskQueue(org.apache.sysml.runtime.controlprogram.parfor.LocalTaskQueue) LocalParWorker(org.apache.sysml.runtime.controlprogram.parfor.LocalParWorker) Timing(org.apache.sysml.runtime.controlprogram.parfor.stat.Timing) TaskPartitioner(org.apache.sysml.runtime.controlprogram.parfor.TaskPartitioner)

Example 10 with Task

use of org.apache.sysml.runtime.controlprogram.parfor.Task in project systemml by apache.

the class ParForProgramBlock method writeTasksToFile.

private static String writeTasksToFile(String fname, LocalTaskQueue<Task> queue, int maxDigits) throws IOException {
    BufferedWriter br = null;
    try {
        Path path = new Path(fname);
        FileSystem fs = IOUtilFunctions.getFileSystem(path);
        br = new BufferedWriter(new OutputStreamWriter(fs.create(path, true)));
        Task t = null;
        // workaround for keeping gen order
        boolean flagFirst = true;
        while ((t = queue.dequeueTask()) != LocalTaskQueue.NO_MORE_TASKS) {
            br.write(createTaskFileLine(t, maxDigits, flagFirst));
            if (flagFirst)
                flagFirst = false;
        }
    } catch (Exception ex) {
        throw new DMLRuntimeException("Error writing tasks to taskfile " + fname, ex);
    } finally {
        IOUtilFunctions.closeSilently(br);
    }
    return fname;
}
Also used : Path(org.apache.hadoop.fs.Path) Task(org.apache.sysml.runtime.controlprogram.parfor.Task) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStreamWriter(java.io.OutputStreamWriter) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException)

Aggregations

Task (org.apache.sysml.runtime.controlprogram.parfor.Task)10 TaskPartitioner (org.apache.sysml.runtime.controlprogram.parfor.TaskPartitioner)6 Timing (org.apache.sysml.runtime.controlprogram.parfor.stat.Timing)6 BufferedWriter (java.io.BufferedWriter)4 IOException (java.io.IOException)4 OutputStreamWriter (java.io.OutputStreamWriter)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Path (org.apache.hadoop.fs.Path)4 DMLRuntimeException (org.apache.sysml.runtime.DMLRuntimeException)4 LocalTaskQueue (org.apache.sysml.runtime.controlprogram.parfor.LocalTaskQueue)4 ParForBody (org.apache.sysml.runtime.controlprogram.parfor.ParForBody)4 RemoteParForJobReturn (org.apache.sysml.runtime.controlprogram.parfor.RemoteParForJobReturn)4 HashMap (java.util.HashMap)2 MatrixObject (org.apache.sysml.runtime.controlprogram.caching.MatrixObject)2 LocalParWorker (org.apache.sysml.runtime.controlprogram.parfor.LocalParWorker)2