use of org.apache.sysml.runtime.instructions.cp.DoubleObject in project incubator-systemml by apache.
the class ProgramConverter method parseDataObject.
/**
* NOTE: MRJobConfiguration cannot be used for the general case because program blocks and
* related symbol tables can be hierarchically structured.
*
* @param in data object as string
* @return array of objects
*/
public static Object[] parseDataObject(String in) {
Object[] ret = new Object[2];
StringTokenizer st = new StringTokenizer(in, DATA_FIELD_DELIM);
String name = st.nextToken();
DataType datatype = DataType.valueOf(st.nextToken());
ValueType valuetype = ValueType.valueOf(st.nextToken());
String valString = st.hasMoreTokens() ? st.nextToken() : "";
Data dat = null;
switch(datatype) {
case SCALAR:
{
switch(valuetype) {
case INT:
dat = new IntObject(Long.parseLong(valString));
break;
case DOUBLE:
dat = new DoubleObject(Double.parseDouble(valString));
break;
case BOOLEAN:
dat = new BooleanObject(Boolean.parseBoolean(valString));
break;
case STRING:
dat = new StringObject(valString);
break;
default:
throw new DMLRuntimeException("Unable to parse valuetype " + valuetype);
}
break;
}
case MATRIX:
{
MatrixObject mo = new MatrixObject(valuetype, valString);
long rows = Long.parseLong(st.nextToken());
long cols = Long.parseLong(st.nextToken());
int brows = Integer.parseInt(st.nextToken());
int bcols = Integer.parseInt(st.nextToken());
long nnz = Long.parseLong(st.nextToken());
InputInfo iin = InputInfo.stringToInputInfo(st.nextToken());
OutputInfo oin = OutputInfo.stringToOutputInfo(st.nextToken());
PartitionFormat partFormat = PartitionFormat.valueOf(st.nextToken());
UpdateType inplace = UpdateType.valueOf(st.nextToken());
MatrixCharacteristics mc = new MatrixCharacteristics(rows, cols, brows, bcols, nnz);
MetaDataFormat md = new MetaDataFormat(mc, oin, iin);
mo.setMetaData(md);
if (partFormat._dpf != PDataPartitionFormat.NONE)
mo.setPartitioned(partFormat._dpf, partFormat._N);
mo.setUpdateType(inplace);
dat = mo;
break;
}
default:
throw new DMLRuntimeException("Unable to parse datatype " + datatype);
}
ret[0] = name;
ret[1] = dat;
return ret;
}
use of org.apache.sysml.runtime.instructions.cp.DoubleObject in project incubator-systemml by apache.
the class SpoofCellwise method execute.
@Override
public ScalarObject execute(ArrayList<MatrixBlock> inputs, ArrayList<ScalarObject> scalarObjects, int k) {
// sanity check
if (inputs == null || inputs.size() < 1)
throw new RuntimeException("Invalid input arguments.");
// input preparation
MatrixBlock a = inputs.get(0);
SideInput[] b = prepInputMatrices(inputs);
double[] scalars = prepInputScalars(scalarObjects);
final int m = a.getNumRows();
final int n = a.getNumColumns();
// sparse safe check
boolean sparseSafe = isSparseSafe() || (b.length == 0 && genexec(0, b, scalars, m, n, 0, 0) == 0);
long inputSize = sparseSafe ? getTotalInputNnz(inputs) : getTotalInputSize(inputs);
if (inputSize < PAR_NUMCELL_THRESHOLD) {
// serial execution
k = 1;
}
double ret = 0;
if (// SINGLE-THREADED
k <= 1) {
if (inputs.get(0) instanceof CompressedMatrixBlock)
ret = executeCompressedAndAgg((CompressedMatrixBlock) a, b, scalars, m, n, sparseSafe, 0, m);
else if (!inputs.get(0).isInSparseFormat())
ret = executeDenseAndAgg(a.getDenseBlock(), b, scalars, m, n, sparseSafe, 0, m);
else
ret = executeSparseAndAgg(a.getSparseBlock(), b, scalars, m, n, sparseSafe, 0, m);
} else // MULTI-THREADED
{
try {
ExecutorService pool = CommonThreadPool.get(k);
ArrayList<ParAggTask> tasks = new ArrayList<>();
int nk = (a instanceof CompressedMatrixBlock) ? k : UtilFunctions.roundToNext(Math.min(8 * k, m / 32), k);
int blklen = (int) (Math.ceil((double) m / nk));
if (a instanceof CompressedMatrixBlock)
blklen = BitmapEncoder.getAlignedBlocksize(blklen);
for (int i = 0; i < nk & i * blklen < m; i++) tasks.add(new ParAggTask(a, b, scalars, m, n, sparseSafe, i * blklen, Math.min((i + 1) * blklen, m)));
// execute tasks
List<Future<Double>> taskret = pool.invokeAll(tasks);
pool.shutdown();
// aggregate partial results
ValueFunction vfun = getAggFunction();
if (vfun instanceof KahanFunction) {
KahanObject kbuff = new KahanObject(0, 0);
KahanPlus kplus = KahanPlus.getKahanPlusFnObject();
for (Future<Double> task : taskret) kplus.execute2(kbuff, task.get());
ret = kbuff._sum;
} else {
for (Future<Double> task : taskret) ret = vfun.execute(ret, task.get());
}
} catch (Exception ex) {
throw new DMLRuntimeException(ex);
}
}
// correction for min/max
if ((_aggOp == AggOp.MIN || _aggOp == AggOp.MAX) && sparseSafe && a.getNonZeros() < a.getNumRows() * a.getNumColumns())
// unseen 0 might be max or min value
ret = getAggFunction().execute(ret, 0);
return new DoubleObject(ret);
}
use of org.apache.sysml.runtime.instructions.cp.DoubleObject in project incubator-systemml by apache.
the class CovarianceSPInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
SparkExecutionContext sec = (SparkExecutionContext) ec;
COVOperator cop = ((COVOperator) _optr);
// get input
JavaPairRDD<MatrixIndexes, MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable(input1.getName());
JavaPairRDD<MatrixIndexes, MatrixBlock> in2 = sec.getBinaryBlockRDDHandleForVariable(input2.getName());
// process central moment instruction
CM_COV_Object cmobj = null;
if (input3 == null) {
// w/o weights
cmobj = in1.join(in2).values().map(new RDDCOVFunction(cop)).fold(new CM_COV_Object(), new RDDCOVReduceFunction(cop));
} else {
// with weights
JavaPairRDD<MatrixIndexes, MatrixBlock> in3 = sec.getBinaryBlockRDDHandleForVariable(input3.getName());
cmobj = in1.join(in2).join(in3).values().map(new RDDCOVWeightsFunction(cop)).fold(new CM_COV_Object(), new RDDCOVReduceFunction(cop));
}
// create scalar output (no lineage information required)
double val = cmobj.getRequiredResult(_optr);
ec.setScalarOutput(output.getName(), new DoubleObject(val));
}
use of org.apache.sysml.runtime.instructions.cp.DoubleObject in project incubator-systemml by apache.
the class QuantilePickSPInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
SparkExecutionContext sec = (SparkExecutionContext) ec;
// get input rdds
JavaPairRDD<MatrixIndexes, MatrixBlock> in = sec.getBinaryBlockRDDHandleForVariable(input1.getName());
MatrixCharacteristics mc = sec.getMatrixCharacteristics(input1.getName());
// (in contrast to cp instructions, w/o weights does not materializes weights of 1)
switch(_type) {
case VALUEPICK:
{
ScalarObject quantile = ec.getScalarInput(input2);
double[] wt = getWeightedQuantileSummary(in, mc, quantile.getDoubleValue());
ec.setScalarOutput(output.getName(), new DoubleObject(wt[3]));
break;
}
case MEDIAN:
{
double[] wt = getWeightedQuantileSummary(in, mc, 0.5);
ec.setScalarOutput(output.getName(), new DoubleObject(wt[3]));
break;
}
case IQM:
{
double[] wt = getWeightedQuantileSummary(in, mc, 0.25, 0.75);
long key25 = (long) Math.ceil(wt[1]);
long key75 = (long) Math.ceil(wt[2]);
JavaPairRDD<MatrixIndexes, MatrixBlock> out = in.filter(new FilterFunction(key25 + 1, key75, mc.getRowsPerBlock())).mapToPair(new ExtractAndSumFunction(key25 + 1, key75, mc.getRowsPerBlock()));
double sum = RDDAggregateUtils.sumStable(out).getValue(0, 0);
double val = MatrixBlock.computeIQMCorrection(sum, wt[0], wt[3], wt[5], wt[4], wt[6]);
ec.setScalarOutput(output.getName(), new DoubleObject(val));
break;
}
default:
throw new DMLRuntimeException("Unsupported qpick operation type: " + _type);
}
}
use of org.apache.sysml.runtime.instructions.cp.DoubleObject in project incubator-systemml by apache.
the class SpoofSPInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
SparkExecutionContext sec = (SparkExecutionContext) ec;
// decide upon broadcast side inputs
boolean[] bcVect = determineBroadcastInputs(sec, _in);
boolean[] bcVect2 = getMatrixBroadcastVector(sec, _in, bcVect);
int main = getMainInputIndex(_in, bcVect);
// create joined input rdd w/ replication if needed
MatrixCharacteristics mcIn = sec.getMatrixCharacteristics(_in[main].getName());
JavaPairRDD<MatrixIndexes, MatrixBlock[]> in = createJoinedInputRDD(sec, _in, bcVect, (_class.getSuperclass() == SpoofOuterProduct.class));
JavaPairRDD<MatrixIndexes, MatrixBlock> out = null;
// create lists of input broadcasts and scalars
ArrayList<PartitionedBroadcast<MatrixBlock>> bcMatrices = new ArrayList<>();
ArrayList<ScalarObject> scalars = new ArrayList<>();
for (int i = 0; i < _in.length; i++) {
if (_in[i].getDataType() == DataType.MATRIX && bcVect[i]) {
bcMatrices.add(sec.getBroadcastForVariable(_in[i].getName()));
} else if (_in[i].getDataType() == DataType.SCALAR) {
// note: even if literal, it might be compiled as scalar placeholder
scalars.add(sec.getScalarInput(_in[i].getName(), _in[i].getValueType(), _in[i].isLiteral()));
}
}
// execute generated operator
if (// CELL
_class.getSuperclass() == SpoofCellwise.class) {
SpoofCellwise op = (SpoofCellwise) CodegenUtils.createInstance(_class);
AggregateOperator aggop = getAggregateOperator(op.getAggOp());
if (_out.getDataType() == DataType.MATRIX) {
// execute codegen block operation
out = in.mapPartitionsToPair(new CellwiseFunction(_class.getName(), _classBytes, bcVect2, bcMatrices, scalars), true);
if ((op.getCellType() == CellType.ROW_AGG && mcIn.getCols() > mcIn.getColsPerBlock()) || (op.getCellType() == CellType.COL_AGG && mcIn.getRows() > mcIn.getRowsPerBlock())) {
long numBlocks = (op.getCellType() == CellType.ROW_AGG) ? mcIn.getNumRowBlocks() : mcIn.getNumColBlocks();
out = RDDAggregateUtils.aggByKeyStable(out, aggop, (int) Math.min(out.getNumPartitions(), numBlocks), false);
}
sec.setRDDHandleForVariable(_out.getName(), out);
// maintain lineage info and output characteristics
maintainLineageInfo(sec, _in, bcVect, _out);
updateOutputMatrixCharacteristics(sec, op);
} else {
// SCALAR
out = in.mapPartitionsToPair(new CellwiseFunction(_class.getName(), _classBytes, bcVect2, bcMatrices, scalars), true);
MatrixBlock tmpMB = RDDAggregateUtils.aggStable(out, aggop);
sec.setVariable(_out.getName(), new DoubleObject(tmpMB.getValue(0, 0)));
}
} else if (// MAGG
_class.getSuperclass() == SpoofMultiAggregate.class) {
SpoofMultiAggregate op = (SpoofMultiAggregate) CodegenUtils.createInstance(_class);
AggOp[] aggOps = op.getAggOps();
MatrixBlock tmpMB = in.mapToPair(new MultiAggregateFunction(_class.getName(), _classBytes, bcVect2, bcMatrices, scalars)).values().fold(new MatrixBlock(), new MultiAggAggregateFunction(aggOps));
sec.setMatrixOutput(_out.getName(), tmpMB, getExtendedOpcode());
} else if (// OUTER
_class.getSuperclass() == SpoofOuterProduct.class) {
if (_out.getDataType() == DataType.MATRIX) {
SpoofOperator op = (SpoofOperator) CodegenUtils.createInstance(_class);
OutProdType type = ((SpoofOuterProduct) op).getOuterProdType();
// update matrix characteristics
updateOutputMatrixCharacteristics(sec, op);
MatrixCharacteristics mcOut = sec.getMatrixCharacteristics(_out.getName());
out = in.mapPartitionsToPair(new OuterProductFunction(_class.getName(), _classBytes, bcVect2, bcMatrices, scalars), true);
if (type == OutProdType.LEFT_OUTER_PRODUCT || type == OutProdType.RIGHT_OUTER_PRODUCT) {
long numBlocks = mcOut.getNumRowBlocks() * mcOut.getNumColBlocks();
out = RDDAggregateUtils.sumByKeyStable(out, (int) Math.min(out.getNumPartitions(), numBlocks), false);
}
sec.setRDDHandleForVariable(_out.getName(), out);
// maintain lineage info and output characteristics
maintainLineageInfo(sec, _in, bcVect, _out);
} else {
out = in.mapPartitionsToPair(new OuterProductFunction(_class.getName(), _classBytes, bcVect2, bcMatrices, scalars), true);
MatrixBlock tmp = RDDAggregateUtils.sumStable(out);
sec.setVariable(_out.getName(), new DoubleObject(tmp.getValue(0, 0)));
}
} else if (_class.getSuperclass() == SpoofRowwise.class) {
// ROW
if (mcIn.getCols() > mcIn.getColsPerBlock()) {
throw new DMLRuntimeException("Invalid spark rowwise operator w/ ncol=" + mcIn.getCols() + ", ncolpb=" + mcIn.getColsPerBlock() + ".");
}
SpoofRowwise op = (SpoofRowwise) CodegenUtils.createInstance(_class);
long clen2 = op.getRowType().isConstDim2(op.getConstDim2()) ? op.getConstDim2() : op.getRowType().isRowTypeB1() ? sec.getMatrixCharacteristics(_in[1].getName()).getCols() : -1;
RowwiseFunction fmmc = new RowwiseFunction(_class.getName(), _classBytes, bcVect2, bcMatrices, scalars, (int) mcIn.getCols(), (int) clen2);
out = in.mapPartitionsToPair(fmmc, op.getRowType() == RowType.ROW_AGG || op.getRowType() == RowType.NO_AGG);
if (op.getRowType().isColumnAgg() || op.getRowType() == RowType.FULL_AGG) {
MatrixBlock tmpMB = RDDAggregateUtils.sumStable(out);
if (op.getRowType().isColumnAgg())
sec.setMatrixOutput(_out.getName(), tmpMB, getExtendedOpcode());
else
sec.setScalarOutput(_out.getName(), new DoubleObject(tmpMB.quickGetValue(0, 0)));
} else // row-agg or no-agg
{
if (op.getRowType() == RowType.ROW_AGG && mcIn.getCols() > mcIn.getColsPerBlock()) {
out = RDDAggregateUtils.sumByKeyStable(out, (int) Math.min(out.getNumPartitions(), mcIn.getNumRowBlocks()), false);
}
sec.setRDDHandleForVariable(_out.getName(), out);
// maintain lineage info and output characteristics
maintainLineageInfo(sec, _in, bcVect, _out);
updateOutputMatrixCharacteristics(sec, op);
}
} else {
throw new DMLRuntimeException("Operator " + _class.getSuperclass() + " is not supported on Spark");
}
}
Aggregations