use of org.apache.sysml.runtime.matrix.data.InputInfo in project incubator-systemml by apache.
the class ResultMergeLocalMemory method createNewMatrixObject.
private MatrixObject createNewMatrixObject(MatrixBlock data) {
ValueType vt = _output.getValueType();
MetaDataFormat metadata = (MetaDataFormat) _output.getMetaData();
MatrixObject moNew = new MatrixObject(vt, _outputFName);
// create deep copy of metadata obj
MatrixCharacteristics mcOld = metadata.getMatrixCharacteristics();
OutputInfo oiOld = metadata.getOutputInfo();
InputInfo iiOld = metadata.getInputInfo();
MatrixCharacteristics mc = new MatrixCharacteristics(mcOld.getRows(), mcOld.getCols(), mcOld.getRowsPerBlock(), mcOld.getColsPerBlock());
mc.setNonZeros(data.getNonZeros());
MetaDataFormat meta = new MetaDataFormat(mc, oiOld, iiOld);
moNew.setMetaData(meta);
// adjust dense/sparse representation
data.examSparsity();
// release new output
moNew.acquireModify(data);
moNew.release();
return moNew;
}
use of org.apache.sysml.runtime.matrix.data.InputInfo in project incubator-systemml by apache.
the class ResultMergeRemoteMR method executeParallelMerge.
@Override
public MatrixObject executeParallelMerge(int par) {
// always create new matrix object (required for nested parallelism)
MatrixObject moNew = null;
if (LOG.isTraceEnabled())
LOG.trace("ResultMerge (remote, mr): Execute serial merge for output " + _output.hashCode() + " (fname=" + _output.getFileName() + ")");
try {
// collect all relevant inputs
Collection<String> srcFnames = new LinkedList<>();
ArrayList<MatrixObject> inMO = new ArrayList<>();
for (MatrixObject in : _inputs) {
// check for empty inputs (no iterations executed)
if (in != null && in != _output) {
// ensure that input file resides on disk
in.exportData();
// add to merge list
srcFnames.add(in.getFileName());
inMO.add(in);
}
}
if (!srcFnames.isEmpty()) {
// ensure that outputfile (for comparison) resides on disk
_output.exportData();
// actual merge
MetaDataFormat metadata = (MetaDataFormat) _output.getMetaData();
MatrixCharacteristics mcOld = metadata.getMatrixCharacteristics();
String fnameCompare = _output.getFileName();
if (mcOld.getNonZeros() == 0)
// no compare required
fnameCompare = null;
executeMerge(fnameCompare, _outputFName, srcFnames.toArray(new String[0]), metadata.getInputInfo(), metadata.getOutputInfo(), mcOld.getRows(), mcOld.getCols(), mcOld.getRowsPerBlock(), mcOld.getColsPerBlock());
// create new output matrix (e.g., to prevent potential export<->read file access conflict
moNew = new MatrixObject(_output.getValueType(), _outputFName);
OutputInfo oiOld = metadata.getOutputInfo();
InputInfo iiOld = metadata.getInputInfo();
MatrixCharacteristics mc = new MatrixCharacteristics(mcOld);
mc.setNonZeros(_isAccum ? -1 : computeNonZeros(_output, inMO));
MetaDataFormat meta = new MetaDataFormat(mc, oiOld, iiOld);
moNew.setMetaData(meta);
} else {
// return old matrix, to prevent copy
moNew = _output;
}
} catch (Exception ex) {
throw new DMLRuntimeException(ex);
}
return moNew;
}
use of org.apache.sysml.runtime.matrix.data.InputInfo in project incubator-systemml by apache.
the class ResultMergeRemoteSpark method executeMerge.
@SuppressWarnings("unchecked")
protected RDDObject executeMerge(MatrixObject compare, MatrixObject[] inputs, long rlen, long clen, int brlen, int bclen) {
String jobname = "ParFor-RMSP";
long t0 = DMLScript.STATISTICS ? System.nanoTime() : 0;
SparkExecutionContext sec = (SparkExecutionContext) _ec;
boolean withCompare = (compare != null);
RDDObject ret = null;
// determine degree of parallelism
int numRed = (int) determineNumReducers(rlen, clen, brlen, bclen, _numReducers);
// sanity check for empty src files
if (inputs == null || inputs.length == 0)
throw new DMLRuntimeException("Execute merge should never be called with no inputs.");
try {
// note: initial implementation via union over all result rdds discarded due to
// stack overflow errors with many parfor tasks, and thus many rdds
// Step 1: construct input rdd from all result files of parfor workers
// a) construct job conf with all files
InputInfo ii = InputInfo.BinaryBlockInputInfo;
JobConf job = new JobConf(ResultMergeRemoteMR.class);
job.setJobName(jobname);
job.setInputFormat(ii.inputFormatClass);
Path[] paths = new Path[inputs.length];
for (int i = 0; i < paths.length; i++) {
// ensure input exists on hdfs (e.g., if in-memory or RDD)
inputs[i].exportData();
paths[i] = new Path(inputs[i].getFileName());
// update rdd handle to allow lazy evaluation by guarding
// against cleanup of temporary result files
setRDDHandleForMerge(inputs[i], sec);
}
FileInputFormat.setInputPaths(job, paths);
// b) create rdd from input files w/ deep copy of keys and blocks
JavaPairRDD<MatrixIndexes, MatrixBlock> rdd = sec.getSparkContext().hadoopRDD(job, ii.inputFormatClass, ii.inputKeyClass, ii.inputValueClass).mapPartitionsToPair(new CopyBlockPairFunction(true), true);
// Step 2a: merge with compare
JavaPairRDD<MatrixIndexes, MatrixBlock> out = null;
if (withCompare) {
JavaPairRDD<MatrixIndexes, MatrixBlock> compareRdd = (JavaPairRDD<MatrixIndexes, MatrixBlock>) sec.getRDDHandleForMatrixObject(compare, InputInfo.BinaryBlockInputInfo);
// merge values which differ from compare values
ResultMergeRemoteSparkWCompare cfun = new ResultMergeRemoteSparkWCompare(_isAccum);
out = // group all result blocks per key
rdd.groupByKey(numRed).join(// join compare block and result blocks
compareRdd).mapToPair(// merge result blocks w/ compare
cfun);
} else // Step 2b: merge without compare
{
// direct merge in any order (disjointness guaranteed)
out = _isAccum ? RDDAggregateUtils.sumByKeyStable(rdd, false) : RDDAggregateUtils.mergeByKey(rdd, false);
}
// Step 3: create output rdd handle w/ lineage
ret = new RDDObject(out);
for (int i = 0; i < paths.length; i++) ret.addLineageChild(inputs[i].getRDDHandle());
if (withCompare)
ret.addLineageChild(compare.getRDDHandle());
} catch (Exception ex) {
throw new DMLRuntimeException(ex);
}
// maintain statistics
Statistics.incrementNoOfCompiledSPInst();
Statistics.incrementNoOfExecutedSPInst();
if (DMLScript.STATISTICS) {
Statistics.maintainCPHeavyHitters(jobname, System.nanoTime() - t0);
}
return ret;
}
use of org.apache.sysml.runtime.matrix.data.InputInfo in project incubator-systemml by apache.
the class VariableCPInstruction method parseInstruction.
public static VariableCPInstruction parseInstruction(String str) {
String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);
String opcode = parts[0];
VariableOperationCode voc = getVariableOperationCode(opcode);
if (voc == VariableOperationCode.CreateVariable) {
if (// && parts.length != 10 )
parts.length < 5)
throw new DMLRuntimeException("Invalid number of operands in createvar instruction: " + str);
} else if (voc == VariableOperationCode.MoveVariable) {
// mvvar tempA A; or mvvar mvar5 "data/out.mtx" "binary"
if (parts.length != 3 && parts.length != 4)
throw new DMLRuntimeException("Invalid number of operands in mvvar instruction: " + str);
} else if (voc == VariableOperationCode.Write) {
// Write instructions for csv files also include three additional parameters (hasHeader, delimiter, sparse)
if (parts.length != 5 && parts.length != 8)
throw new DMLRuntimeException("Invalid number of operands in write instruction: " + str);
} else {
if (voc != VariableOperationCode.RemoveVariable)
// no output
InstructionUtils.checkNumFields(parts, getArity(voc));
}
CPOperand in1 = null, in2 = null, in3 = null, in4 = null, out = null;
switch(voc) {
case CreateVariable:
// variable name
DataType dt = DataType.valueOf(parts[4]);
ValueType vt = dt == DataType.MATRIX ? ValueType.DOUBLE : ValueType.STRING;
int extSchema = (dt == DataType.FRAME && parts.length >= 13) ? 1 : 0;
in1 = new CPOperand(parts[1], vt, dt);
// file name
in2 = new CPOperand(parts[2], ValueType.STRING, DataType.SCALAR);
// file name override flag (always literal)
in3 = new CPOperand(parts[3], ValueType.BOOLEAN, DataType.SCALAR);
// format
String fmt = parts[5];
if (fmt.equalsIgnoreCase("csv")) {
// 14 inputs: createvar corresponding to READ -- includes properties hasHeader, delim, fill, and fillValue
if (parts.length < 15 + extSchema || parts.length > 17 + extSchema)
throw new DMLRuntimeException("Invalid number of operands in createvar instruction: " + str);
} else {
if (parts.length != 6 && parts.length != 12 + extSchema)
throw new DMLRuntimeException("Invalid number of operands in createvar instruction: " + str);
}
OutputInfo oi = OutputInfo.stringToOutputInfo(fmt);
InputInfo ii = OutputInfo.getMatchingInputInfo(oi);
MatrixCharacteristics mc = new MatrixCharacteristics();
if (parts.length == 6) {
// do nothing
} else if (parts.length >= 11) {
// matrix characteristics
mc.setDimension(Long.parseLong(parts[6]), Long.parseLong(parts[7]));
mc.setBlockSize(Integer.parseInt(parts[8]), Integer.parseInt(parts[9]));
mc.setNonZeros(Long.parseLong(parts[10]));
} else {
throw new DMLRuntimeException("Invalid number of operands in createvar instruction: " + str);
}
MetaDataFormat iimd = new MetaDataFormat(mc, oi, ii);
UpdateType updateType = UpdateType.COPY;
if (parts.length >= 12)
updateType = UpdateType.valueOf(parts[11].toUpperCase());
// handle frame schema
String schema = (dt == DataType.FRAME && parts.length >= 13) ? parts[parts.length - 1] : null;
if (fmt.equalsIgnoreCase("csv")) {
// Cretevar instructions for CSV format either has 13 or 14 inputs.
// 13 inputs: createvar corresponding to WRITE -- includes properties hasHeader, delim, and sparse
// 14 inputs: createvar corresponding to READ -- includes properties hasHeader, delim, fill, and fillValue
FileFormatProperties fmtProperties = null;
if (parts.length == 15 + extSchema) {
boolean hasHeader = Boolean.parseBoolean(parts[12]);
String delim = parts[13];
boolean sparse = Boolean.parseBoolean(parts[14]);
fmtProperties = new CSVFileFormatProperties(hasHeader, delim, sparse);
} else {
boolean hasHeader = Boolean.parseBoolean(parts[12]);
String delim = parts[13];
boolean fill = Boolean.parseBoolean(parts[14]);
double fillValue = UtilFunctions.parseToDouble(parts[15]);
String naStrings = null;
if (parts.length == 17 + extSchema)
naStrings = parts[16];
fmtProperties = new CSVFileFormatProperties(hasHeader, delim, fill, fillValue, naStrings);
}
return new VariableCPInstruction(VariableOperationCode.CreateVariable, in1, in2, in3, iimd, updateType, fmtProperties, schema, opcode, str);
} else {
return new VariableCPInstruction(VariableOperationCode.CreateVariable, in1, in2, in3, iimd, updateType, schema, opcode, str);
}
case AssignVariable:
in1 = new CPOperand(parts[1]);
in2 = new CPOperand(parts[2]);
break;
case CopyVariable:
// Value types are not given here
in1 = new CPOperand(parts[1], ValueType.UNKNOWN, DataType.UNKNOWN);
in2 = new CPOperand(parts[2], ValueType.UNKNOWN, DataType.UNKNOWN);
break;
case MoveVariable:
in1 = new CPOperand(parts[1], ValueType.UNKNOWN, DataType.UNKNOWN);
in2 = new CPOperand(parts[2], ValueType.UNKNOWN, DataType.UNKNOWN);
if (parts.length > 3)
in3 = new CPOperand(parts[3], ValueType.UNKNOWN, DataType.UNKNOWN);
break;
case RemoveVariable:
VariableCPInstruction rminst = new VariableCPInstruction(getVariableOperationCode(opcode), null, null, null, out, opcode, str);
for (int i = 1; i < parts.length; i++) rminst.addInput(new CPOperand(parts[i], ValueType.UNKNOWN, DataType.SCALAR));
return rminst;
case RemoveVariableAndFile:
in1 = new CPOperand(parts[1]);
in2 = new CPOperand(parts[2]);
// second argument must be a boolean
if (in2.getValueType() != ValueType.BOOLEAN)
throw new DMLRuntimeException("Unexpected value type for second argument in: " + str);
break;
case CastAsScalarVariable:
case CastAsMatrixVariable:
case CastAsFrameVariable:
case CastAsDoubleVariable:
case CastAsIntegerVariable:
case CastAsBooleanVariable:
// first operand is a variable name => string value type
in1 = new CPOperand(parts[1]);
// output variable name
out = new CPOperand(parts[2]);
break;
case Write:
in1 = new CPOperand(parts[1]);
in2 = new CPOperand(parts[2]);
in3 = new CPOperand(parts[3]);
FileFormatProperties fprops = null;
if (in3.getName().equalsIgnoreCase("csv")) {
boolean hasHeader = Boolean.parseBoolean(parts[4]);
String delim = parts[5];
boolean sparse = Boolean.parseBoolean(parts[6]);
fprops = new CSVFileFormatProperties(hasHeader, delim, sparse);
// description
in4 = new CPOperand(parts[7]);
} else {
fprops = new FileFormatProperties();
// description
in4 = new CPOperand(parts[4]);
}
VariableCPInstruction inst = new VariableCPInstruction(getVariableOperationCode(opcode), in1, in2, in3, out, null, fprops, null, null, opcode, str);
inst.addInput(in4);
return inst;
case Read:
in1 = new CPOperand(parts[1]);
in2 = new CPOperand(parts[2]);
out = null;
break;
case SetFileName:
// variable name
in1 = new CPOperand(parts[1]);
// file name
in2 = new CPOperand(parts[2], ValueType.UNKNOWN, DataType.UNKNOWN);
// option: remote or local
in3 = new CPOperand(parts[3], ValueType.UNKNOWN, DataType.UNKNOWN);
// return new VariableCPInstruction(getVariableOperationCode(opcode), in1, in2, in3, str);
break;
}
return new VariableCPInstruction(getVariableOperationCode(opcode), in1, in2, in3, out, opcode, str);
}
use of org.apache.sysml.runtime.matrix.data.InputInfo in project incubator-systemml by apache.
the class CMCOVMR method runJob.
public static JobReturn runJob(MRJobInstruction inst, String[] inputs, InputInfo[] inputInfos, long[] rlens, long[] clens, int[] brlens, int[] bclens, String instructionsInMapper, String cmNcomInstructions, int numReducers, int replication, byte[] resultIndexes, String[] outputs, OutputInfo[] outputInfos) throws Exception {
JobConf job = new JobConf(CMCOVMR.class);
job.setJobName("CM-COV-MR");
// whether use block representation or cell representation
MRJobConfiguration.setMatrixValueClassForCM_N_COM(job, true);
// added for handling recordreader instruction
String[] realinputs = inputs;
InputInfo[] realinputInfos = inputInfos;
long[] realrlens = rlens;
long[] realclens = clens;
int[] realbrlens = brlens;
int[] realbclens = bclens;
byte[] realIndexes = new byte[inputs.length];
for (byte b = 0; b < realIndexes.length; b++) realIndexes[b] = b;
// set up the input files and their format information
MRJobConfiguration.setUpMultipleInputs(job, realIndexes, realinputs, realinputInfos, realbrlens, realbclens, true, ConvertTarget.WEIGHTEDCELL);
// set up the dimensions of input matrices
MRJobConfiguration.setMatricesDimensions(job, realIndexes, realrlens, realclens);
// set up the block size
MRJobConfiguration.setBlocksSizes(job, realIndexes, realbrlens, realbclens);
// set up unary instructions that will perform in the mapper
MRJobConfiguration.setInstructionsInMapper(job, instructionsInMapper);
// set up the aggregate instructions that will happen in the combiner and reducer
MRJobConfiguration.setCM_N_COMInstructions(job, cmNcomInstructions);
// set up the replication factor for the results
job.setInt(MRConfigurationNames.DFS_REPLICATION, replication);
// set up custom map/reduce configurations
DMLConfig config = ConfigurationManager.getDMLConfig();
MRJobConfiguration.setupCustomMRConfigurations(job, config);
// set up what matrices are needed to pass from the mapper to reducer
HashSet<Byte> mapoutputIndexes = MRJobConfiguration.setUpOutputIndexesForMapper(job, realIndexes, instructionsInMapper, null, cmNcomInstructions, resultIndexes);
// set up the multiple output files, and their format information
MRJobConfiguration.setUpMultipleOutputs(job, resultIndexes, new byte[resultIndexes.length], outputs, outputInfos, false);
// configure mapper and the mapper output key value pairs
job.setMapperClass(CMCOVMRMapper.class);
job.setMapOutputKeyClass(TaggedFirstSecondIndexes.class);
job.setMapOutputValueClass(CM_N_COVCell.class);
job.setOutputKeyComparatorClass(TaggedFirstSecondIndexes.Comparator.class);
job.setPartitionerClass(TaggedFirstSecondIndexes.TagPartitioner.class);
// configure reducer
job.setReducerClass(CMCOVMRReducer.class);
// job.setReducerClass(PassThroughReducer.class);
MatrixCharacteristics[] stats = MRJobConfiguration.computeMatrixCharacteristics(job, realIndexes, instructionsInMapper, null, null, cmNcomInstructions, resultIndexes, mapoutputIndexes, false).stats;
// set up the number of reducers
// each output tag is a group
MRJobConfiguration.setNumReducers(job, mapoutputIndexes.size(), numReducers);
// Print the complete instruction
if (LOG.isTraceEnabled())
inst.printCompleteMRJobInstruction(stats);
// By default, the job executes in "cluster" mode.
// Determine if we can optimize and run it in "local" mode.
MatrixCharacteristics[] inputStats = new MatrixCharacteristics[inputs.length];
for (int i = 0; i < inputs.length; i++) {
inputStats[i] = new MatrixCharacteristics(rlens[i], clens[i], brlens[i], bclens[i]);
}
// set unique working dir
MRJobConfiguration.setUniqueWorkingDir(job);
RunningJob runjob = JobClient.runJob(job);
return new JobReturn(stats, outputInfos, runjob.isSuccessful());
}
Aggregations