use of org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitioner in project incubator-systemml by apache.
the class OptimizerRuleBased method rewriteSetExecutionStategy.
///////
//REWRITE set execution strategy
///
protected boolean rewriteSetExecutionStategy(OptNode n, double M0, double M, double M2, double M3, boolean flagLIX) throws DMLRuntimeException {
boolean isCPOnly = n.isCPOnly();
boolean isCPOnlyPossible = isCPOnly || isCPOnlyPossible(n, _rm);
String datapartitioner = n.getParam(ParamType.DATA_PARTITIONER);
ExecType REMOTE = getRemoteExecType();
PDataPartitioner REMOTE_DP = OptimizerUtils.isSparkExecutionMode() ? PDataPartitioner.REMOTE_SPARK : PDataPartitioner.REMOTE_MR;
//deciding on the execution strategy
if (//allowed remote parfor execution
ConfigurationManager.isParallelParFor() && (//Required: all inst already in cp and fit in remote mem
(isCPOnly && M <= _rm) || //Required: all inst already in cp and fit partitioned in remote mem
(isCPOnly && M3 <= _rm) || //Required: all inst forced to cp fit in remote mem
(isCPOnlyPossible && M2 <= _rm))) {
//at this point all required conditions for REMOTE_MR given, now its an opt decision
//estimated local exploited par
int cpk = (int) Math.min(_lk, Math.floor(_lm / M));
//(the factor of 2 is to account for hyper-threading and in order prevent too eager remote parfor)
if (//incl conditional partitioning
2 * cpk < _lk && 2 * cpk < _N && 2 * cpk < _rk) {
//remote parfor
n.setExecType(REMOTE);
} else //MR if problem is large enough and remote parallelism is larger than local
if (_lk < _N && _lk < _rk && M <= _rm && isLargeProblem(n, M0)) {
//remote parfor
n.setExecType(REMOTE);
} else //MR if MR operations in local, but CP only in remote (less overall MR jobs)
if (!isCPOnly && isCPOnlyPossible) {
//remote parfor
n.setExecType(REMOTE);
} else //MR if necessary for LIX rewrite (LIX true iff cp only and rm valid)
if (flagLIX) {
//remote parfor
n.setExecType(REMOTE);
} else //MR if remote data partitioning, because data will be distributed on all nodes
if (datapartitioner != null && datapartitioner.equals(REMOTE_DP.toString()) && !InfrastructureAnalyzer.isLocalMode()) {
//remote parfor
n.setExecType(REMOTE);
} else //otherwise CP
{
//local parfor
n.setExecType(ExecType.CP);
}
} else //mr instructions in body, or rm too small
{
//local parfor
n.setExecType(ExecType.CP);
}
//actual programblock modification
long id = n.getID();
ParForProgramBlock pfpb = (ParForProgramBlock) OptTreeConverter.getAbstractPlanMapping().getMappedProg(id)[1];
PExecMode mode = n.getExecType().toParForExecMode();
pfpb.setExecMode(mode);
//decide if recompilation according to remote mem budget necessary
boolean requiresRecompile = ((mode == PExecMode.REMOTE_MR || mode == PExecMode.REMOTE_SPARK) && !isCPOnly);
_numEvaluatedPlans++;
LOG.debug(getOptMode() + " OPT: rewrite 'set execution strategy' - result=" + mode + " (recompile=" + requiresRecompile + ")");
return requiresRecompile;
}
use of org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitioner in project incubator-systemml by apache.
the class OptimizerRuleBased method rewriteSetDataPartitioner.
///////
//REWRITE set data partitioner
///
protected boolean rewriteSetDataPartitioner(OptNode n, LocalVariableMap vars, HashMap<String, PartitionFormat> partitionedMatrices, double thetaM) throws DMLRuntimeException {
if (n.getNodeType() != NodeType.PARFOR)
LOG.warn(getOptMode() + " OPT: Data partitioner can only be set for a ParFor node.");
boolean blockwise = false;
//preparations
long id = n.getID();
Object[] o = OptTreeConverter.getAbstractPlanMapping().getMappedProg(id);
ParForStatementBlock pfsb = (ParForStatementBlock) o[0];
ParForProgramBlock pfpb = (ParForProgramBlock) o[1];
//search for candidates
boolean apply = false;
if (//only if we are allowed to recompile
OptimizerUtils.isHybridExecutionMode() && //only if beneficial wrt problem size
(_N >= PROB_SIZE_THRESHOLD_PARTITIONING || _Nmax >= PROB_SIZE_THRESHOLD_PARTITIONING)) {
ArrayList<String> cand = pfsb.getReadOnlyParentVars();
HashMap<String, PartitionFormat> cand2 = new HashMap<String, PartitionFormat>();
for (String c : cand) {
PartitionFormat dpf = pfsb.determineDataPartitionFormat(c);
if (dpf != PartitionFormat.NONE && dpf._dpf != PDataPartitionFormat.BLOCK_WISE_M_N) {
cand2.put(c, dpf);
}
}
apply = rFindDataPartitioningCandidates(n, cand2, vars, thetaM);
if (apply)
partitionedMatrices.putAll(cand2);
}
PDataPartitioner REMOTE = OptimizerUtils.isSparkExecutionMode() ? PDataPartitioner.REMOTE_SPARK : PDataPartitioner.REMOTE_MR;
PDataPartitioner pdp = (apply) ? REMOTE : PDataPartitioner.NONE;
//NOTE: since partitioning is only applied in case of MR index access, we assume a large
// matrix and hence always apply REMOTE_MR (the benefit for large matrices outweigths
// potentially unnecessary MR jobs for smaller matrices)
// modify rtprog
pfpb.setDataPartitioner(pdp);
// modify plan
n.addParam(ParamType.DATA_PARTITIONER, pdp.toString());
_numEvaluatedPlans++;
LOG.debug(getOptMode() + " OPT: rewrite 'set data partitioner' - result=" + pdp.toString() + " (" + ProgramConverter.serializeStringCollection(partitionedMatrices.keySet()) + ")");
return blockwise;
}
use of org.apache.sysml.runtime.controlprogram.ParForProgramBlock.PDataPartitioner in project incubator-systemml by apache.
the class OptimizerRuleBased method rewriteSetFusedDataPartitioningExecution.
///////
//REWRITE set fused data partitioning / execution
///
/**
* This dedicated execution mode can only be applied if all of the
* following conditions are true:
* - Only cp instructions in the parfor body
* - Only one partitioned input
* - number of iterations is equal to number of partitions (nrow/ncol)
* - partitioned matrix access via plain iteration variables (no composed expressions)
* (this ensures that each partition is exactly read once)
* - no left indexing (since by default static task partitioning)
*
* Furthermore, it should be only chosen if we already decided for remote partitioning
* and otherwise would create a large number of partition files.
*
* NOTE: We already respect the reducer memory budget for plan correctness. However,
* we miss optimization potential if the reducer budget is larger than the mapper budget
* (if we were not able to select REMOTE_MR as execution strategy wrt mapper budget)
* TODO modify 'set exec strategy' and related rewrites for conditional data partitioning.
*
* @param pn internal representation of a plan alternative for program blocks and instructions
* @param M ?
* @param flagLIX ?
* @param partitionedMatrices map of data partition formats
* @param vars local variable map
* @throws DMLRuntimeException if DMLRuntimeException occurs
*/
protected void rewriteSetFusedDataPartitioningExecution(OptNode pn, double M, boolean flagLIX, HashMap<String, PartitionFormat> partitionedMatrices, LocalVariableMap vars) throws DMLRuntimeException {
//assertions (warnings of corrupt optimizer decisions)
if (pn.getNodeType() != NodeType.PARFOR)
LOG.warn(getOptMode() + " OPT: Fused data partitioning and execution is only applicable for a ParFor node.");
boolean apply = false;
String partitioner = pn.getParam(ParamType.DATA_PARTITIONER);
PDataPartitioner REMOTE_DP = OptimizerUtils.isSparkExecutionMode() ? PDataPartitioner.REMOTE_SPARK : PDataPartitioner.REMOTE_MR;
PExecMode REMOTE_DPE = OptimizerUtils.isSparkExecutionMode() ? PExecMode.REMOTE_SPARK_DP : PExecMode.REMOTE_MR_DP;
// try to merge MR data partitioning and MR exec
if ((//fits into remote memory of reducers
pn.getExecType() == ExecType.MR && M < _rm2 || //MR/SP EXEC and CP body
pn.getExecType() == ExecType.SPARK) && partitioner != null && //MR/SP partitioning
partitioner.equals(REMOTE_DP.toString()) && //only one partitioned matrix
partitionedMatrices.size() == 1) {
ParForProgramBlock pfpb = (ParForProgramBlock) OptTreeConverter.getAbstractPlanMapping().getMappedProg(pn.getID())[1];
//partitioned matrix
String moVarname = partitionedMatrices.keySet().iterator().next();
PartitionFormat moDpf = partitionedMatrices.get(moVarname);
MatrixObject mo = (MatrixObject) vars.get(moVarname);
//check if access via iteration variable and sizes match
String iterVarname = pfpb.getIterablePredicateVars()[0];
if (rIsAccessByIterationVariable(pn, moVarname, iterVarname) && ((moDpf == PartitionFormat.ROW_WISE && mo.getNumRows() == _N) || (moDpf == PartitionFormat.COLUMN_WISE && mo.getNumColumns() == _N) || (moDpf._dpf == PDataPartitionFormat.ROW_BLOCK_WISE_N && mo.getNumRows() <= _N * moDpf._N) || (moDpf._dpf == PDataPartitionFormat.COLUMN_BLOCK_WISE_N && mo.getNumColumns() <= _N * moDpf._N))) {
int k = (int) Math.min(_N, _rk2);
pn.addParam(ParamType.DATA_PARTITIONER, REMOTE_DPE.toString() + "(fused)");
pn.setK(k);
//set fused exec type
pfpb.setExecMode(REMOTE_DPE);
pfpb.setDataPartitioner(PDataPartitioner.NONE);
pfpb.enableColocatedPartitionedMatrix(moVarname);
pfpb.setDegreeOfParallelism(k);
apply = true;
}
}
LOG.debug(getOptMode() + " OPT: rewrite 'set fused data partitioning and execution' - result=" + apply);
}
Aggregations