use of org.apache.sysml.conf.CompilerConfig in project systemml by apache.
the class ScriptExecutor method setGlobalFlags.
/**
* Set the global flags (for example: statistics, gpu, etc).
*/
protected void setGlobalFlags() {
oldStatistics = DMLScript.STATISTICS;
DMLScript.STATISTICS = statistics;
oldForceGPU = DMLScript.FORCE_ACCELERATOR;
DMLScript.FORCE_ACCELERATOR = forceGPU;
oldGPU = DMLScript.USE_ACCELERATOR;
DMLScript.USE_ACCELERATOR = gpu;
DMLScript.STATISTICS_COUNT = statisticsMaxHeavyHitters;
// set the global compiler configuration
try {
OptimizerUtils.resetStaticCompilerFlags();
CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(ConfigurationManager.getCompilerConfig(), config);
ConfigurationManager.setGlobalConfig(cconf);
} catch (DMLRuntimeException ex) {
throw new RuntimeException(ex);
}
// set the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)
GPUContextPool.AVAILABLE_GPUS = config.getTextValue(DMLConfig.AVAILABLE_GPUS);
String evictionPolicy = config.getTextValue(DMLConfig.GPU_EVICTION_POLICY).toUpperCase();
try {
DMLScript.GPU_EVICTION_POLICY = EvictionPolicy.valueOf(evictionPolicy);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unsupported eviction policy:" + evictionPolicy);
}
}
use of org.apache.sysml.conf.CompilerConfig in project incubator-systemml by apache.
the class ParForProgramBlock method createParallelWorker.
/**
* Creates a new or partially recycled instance of a parallel worker. Therefore the symbol table, and child
* program blocks are deep copied. Note that entries of the symbol table are not deep copied because they are replaced
* anyway on the next write. In case of recycling the deep copies of program blocks are recycled from previous
* executions of this parfor.
*
* @param pwID parworker id
* @param queue task queue
* @param ec execution context
* @return local parworker
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
private LocalParWorker createParallelWorker(long pwID, LocalTaskQueue<Task> queue, ExecutionContext ec) throws DMLRuntimeException {
LocalParWorker pw = null;
try {
//create deep copies of required elements child blocks
ArrayList<ProgramBlock> cpChildBlocks = null;
HashSet<String> fnNames = new HashSet<String>();
if (USE_PB_CACHE) {
if (_pbcache.containsKey(pwID)) {
cpChildBlocks = _pbcache.get(pwID);
} else {
cpChildBlocks = ProgramConverter.rcreateDeepCopyProgramBlocks(_childBlocks, pwID, _IDPrefix, new HashSet<String>(), fnNames, false, false);
_pbcache.put(pwID, cpChildBlocks);
}
} else {
cpChildBlocks = ProgramConverter.rcreateDeepCopyProgramBlocks(_childBlocks, pwID, _IDPrefix, new HashSet<String>(), fnNames, false, false);
}
//deep copy execution context (including prepare parfor update-in-place)
ExecutionContext cpEc = ProgramConverter.createDeepCopyExecutionContext(ec);
// and sets it in the ExecutionContext
if (DMLScript.USE_ACCELERATOR) {
cpEc.setGPUContext(GPUContextPool.getFromPool());
}
//prepare basic update-in-place variables (vars dropped on result merge)
prepareUpdateInPlaceVariables(cpEc, pwID);
//copy compiler configuration (for jmlc w/o global config)
CompilerConfig cconf = ConfigurationManager.getCompilerConfig();
//create the actual parallel worker
ParForBody body = new ParForBody(cpChildBlocks, _resultVars, cpEc);
pw = new LocalParWorker(pwID, queue, body, cconf, MAX_RETRYS_ON_ERROR, _monitor);
pw.setFunctionNames(fnNames);
} catch (Exception ex) {
throw new DMLRuntimeException(ex);
}
return pw;
}
use of org.apache.sysml.conf.CompilerConfig in project incubator-systemml by apache.
the class ScriptExecutor method setGlobalFlags.
/**
* Set the global flags (for example: statistics, gpu, etc).
*/
protected void setGlobalFlags() {
oldStatistics = DMLScript.STATISTICS;
DMLScript.STATISTICS = statistics;
oldForceGPU = DMLScript.FORCE_ACCELERATOR;
DMLScript.FORCE_ACCELERATOR = forceGPU;
oldGPU = DMLScript.USE_ACCELERATOR;
DMLScript.USE_ACCELERATOR = gpu;
DMLScript.STATISTICS_COUNT = statisticsMaxHeavyHitters;
// set the global compiler configuration
try {
OptimizerUtils.resetStaticCompilerFlags();
CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(ConfigurationManager.getCompilerConfig(), config);
ConfigurationManager.setGlobalConfig(cconf);
} catch (DMLRuntimeException ex) {
throw new RuntimeException(ex);
}
// set the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)
GPUContextPool.AVAILABLE_GPUS = config.getTextValue(DMLConfig.AVAILABLE_GPUS);
String evictionPolicy = config.getTextValue(DMLConfig.GPU_EVICTION_POLICY).toUpperCase();
try {
DMLScript.GPU_EVICTION_POLICY = EvictionPolicy.valueOf(evictionPolicy);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unsupported eviction policy:" + evictionPolicy);
}
}
use of org.apache.sysml.conf.CompilerConfig in project incubator-systemml by apache.
the class DMLScript method execute.
// /////////////////////////////
// private internal interface
// (core compilation and execute)
// //////
/**
* The running body of DMLScript execution. This method should be called after execution properties have been correctly set,
* and customized parameters have been put into _argVals
*
* @param dmlScriptStr DML script string
* @param fnameOptConfig configuration file
* @param argVals map of argument values
* @param allArgs arguments
* @param scriptType type of script (DML or PyDML)
* @throws IOException if IOException occurs
*/
private static void execute(String dmlScriptStr, String fnameOptConfig, Map<String, String> argVals, String[] allArgs, ScriptType scriptType) throws IOException {
SCRIPT_TYPE = scriptType;
// print basic time and environment info
printStartExecInfo(dmlScriptStr);
// Step 1: parse configuration files & write any configuration specific global variables
DMLConfig dmlconf = DMLConfig.readConfigurationFile(fnameOptConfig);
ConfigurationManager.setGlobalConfig(dmlconf);
CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(dmlconf);
ConfigurationManager.setGlobalConfig(cconf);
LOG.debug("\nDML config: \n" + dmlconf.getConfigInfo());
// Sets the GPUs to use for this process (a range, all GPUs, comma separated list or a specific GPU)
GPUContextPool.AVAILABLE_GPUS = dmlconf.getTextValue(DMLConfig.AVAILABLE_GPUS);
String evictionPolicy = dmlconf.getTextValue(DMLConfig.GPU_EVICTION_POLICY).toUpperCase();
try {
DMLScript.GPU_EVICTION_POLICY = EvictionPolicy.valueOf(evictionPolicy);
} catch (IllegalArgumentException e) {
throw new RuntimeException("Unsupported eviction policy:" + evictionPolicy);
}
// Step 2: set local/remote memory if requested (for compile in AM context)
if (dmlconf.getBooleanValue(DMLConfig.YARN_APPMASTER)) {
DMLAppMasterUtils.setupConfigRemoteMaxMemory(dmlconf);
}
// Step 3: parse dml script
Statistics.startCompileTimer();
ParserWrapper parser = ParserFactory.createParser(scriptType);
DMLProgram prog = parser.parse(DML_FILE_PATH_ANTLR_PARSER, dmlScriptStr, argVals);
// Step 4: construct HOP DAGs (incl LVA, validate, and setup)
DMLTranslator dmlt = new DMLTranslator(prog);
dmlt.liveVariableAnalysis(prog);
dmlt.validateParseTree(prog);
dmlt.constructHops(prog);
// init working directories (before usage by following compilation steps)
initHadoopExecution(dmlconf);
// Step 5: rewrite HOP DAGs (incl IPA and memory estimates)
dmlt.rewriteHopsDAG(prog);
// Step 6: construct lops (incl exec type and op selection)
dmlt.constructLops(prog);
if (LOG.isDebugEnabled()) {
LOG.debug("\n********************** LOPS DAG *******************");
dmlt.printLops(prog);
dmlt.resetLopsDAGVisitStatus(prog);
}
// Step 7: generate runtime program, incl codegen
Program rtprog = dmlt.getRuntimeProgram(prog, dmlconf);
// launch SystemML appmaster (if requested and not already in launched AM)
if (dmlconf.getBooleanValue(DMLConfig.YARN_APPMASTER)) {
if (!isActiveAM() && DMLYarnClientProxy.launchDMLYarnAppmaster(dmlScriptStr, dmlconf, allArgs, rtprog))
// if AM launch unsuccessful, fall back to normal execute
return;
if (// in AM context (not failed AM launch)
isActiveAM())
DMLAppMasterUtils.setupProgramMappingRemoteMaxMemory(rtprog);
}
// Step 9: prepare statistics [and optional explain output]
// count number compiled MR jobs / SP instructions
ExplainCounts counts = Explain.countDistributedOperations(rtprog);
Statistics.resetNoOfCompiledJobs(counts.numJobs);
// explain plan of program (hops or runtime)
if (EXPLAIN != ExplainType.NONE)
LOG.info(Explain.display(prog, rtprog, EXPLAIN, counts));
Statistics.stopCompileTimer();
// double costs = CostEstimationWrapper.getTimeEstimate(rtprog, ExecutionContextFactory.createContext());
// System.out.println("Estimated costs: "+costs);
// Step 10: execute runtime program
ExecutionContext ec = null;
try {
ec = ExecutionContextFactory.createContext(rtprog);
ScriptExecutorUtils.executeRuntimeProgram(rtprog, ec, dmlconf, STATISTICS ? STATISTICS_COUNT : 0);
} finally {
if (ec != null && ec instanceof SparkExecutionContext)
((SparkExecutionContext) ec).close();
LOG.info("END DML run " + getDateTime());
// cleanup scratch_space and all working dirs
cleanupHadoopExecution(dmlconf);
}
}
use of org.apache.sysml.conf.CompilerConfig in project incubator-systemml by apache.
the class DMLAppMasterUtils method setupConfigRemoteMaxMemory.
public static void setupConfigRemoteMaxMemory(DMLConfig conf) {
// set remote max memory (if in yarn appmaster context)
if (DMLScript.isActiveAM()) {
// set optimization level (for awareness of resource optimization)
CompilerConfig cconf = OptimizerUtils.constructCompilerConfig(conf);
ConfigurationManager.setGlobalConfig(cconf);
if (isResourceOptimizerEnabled()) {
// handle optimized memory (mr memory budget per program block)
// ensure cluster has been analyzed
InfrastructureAnalyzer.getRemoteMaxMemoryMap();
String memStr = conf.getTextValue(DMLConfig.YARN_MAPREDUCEMEM);
ResourceConfig rc = ResourceConfig.deserialize(memStr);
// keep resource config for later program mapping
_rc = rc;
} else {
// handle user configuration
if (conf.getIntValue(DMLConfig.YARN_MAPREDUCEMEM) > 0) {
// ensure cluster has been analyzed
InfrastructureAnalyzer.getRemoteMaxMemoryMap();
// set max map and reduce memory (to be used by the compiler)
// see GMR and parfor EMR and DPEMR for runtime configuration
long mem = ((long) conf.getIntValue(DMLConfig.YARN_MAPREDUCEMEM)) * 1024 * 1024;
InfrastructureAnalyzer.setRemoteMaxMemoryMap(mem);
InfrastructureAnalyzer.setRemoteMaxMemoryReduce(mem);
}
}
}
}
Aggregations