use of org.apache.sysml.runtime.matrix.data.MatrixBlock in project incubator-systemml by apache.
the class BinarySPInstruction method processMatrixScalarBinaryInstruction.
protected void processMatrixScalarBinaryInstruction(ExecutionContext ec) {
SparkExecutionContext sec = (SparkExecutionContext) ec;
// get input RDD
String rddVar = (input1.getDataType() == DataType.MATRIX) ? input1.getName() : input2.getName();
JavaPairRDD<MatrixIndexes, MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable(rddVar);
// get operator and scalar
CPOperand scalar = (input1.getDataType() == DataType.MATRIX) ? input2 : input1;
ScalarObject constant = (ScalarObject) ec.getScalarInput(scalar.getName(), scalar.getValueType(), scalar.isLiteral());
ScalarOperator sc_op = (ScalarOperator) _optr;
sc_op = sc_op.setConstant(constant.getDoubleValue());
// execute scalar matrix arithmetic instruction
JavaPairRDD<MatrixIndexes, MatrixBlock> out = in1.mapValues(new MatrixScalarUnaryFunction(sc_op));
// put output RDD handle into symbol table
updateUnaryOutputMatrixCharacteristics(sec, rddVar, output.getName());
sec.setRDDHandleForVariable(output.getName(), out);
sec.addLineageRDD(output.getName(), rddVar);
}
use of org.apache.sysml.runtime.matrix.data.MatrixBlock in project incubator-systemml by apache.
the class CastSPInstruction method processInstruction.
@Override
@SuppressWarnings("unchecked")
public void processInstruction(ExecutionContext ec) {
SparkExecutionContext sec = (SparkExecutionContext) ec;
String opcode = getOpcode();
// get input RDD and prepare output
JavaPairRDD<?, ?> in = sec.getRDDHandleForVariable(input1.getName(), InputInfo.BinaryBlockInputInfo);
MatrixCharacteristics mcIn = sec.getMatrixCharacteristics(input1.getName());
JavaPairRDD<?, ?> out = null;
// convert frame-matrix / matrix-frame and set output
if (opcode.equals(UnaryCP.CAST_AS_MATRIX_OPCODE)) {
MatrixCharacteristics mcOut = new MatrixCharacteristics(mcIn);
mcOut.setBlockSize(ConfigurationManager.getBlocksize(), ConfigurationManager.getBlocksize());
out = FrameRDDConverterUtils.binaryBlockToMatrixBlock((JavaPairRDD<Long, FrameBlock>) in, mcIn, mcOut);
} else if (opcode.equals(UnaryCP.CAST_AS_FRAME_OPCODE)) {
out = FrameRDDConverterUtils.matrixBlockToBinaryBlockLongIndex(sec.getSparkContext(), (JavaPairRDD<MatrixIndexes, MatrixBlock>) in, mcIn);
} else {
throw new DMLRuntimeException("Unsupported spark cast operation: " + opcode);
}
// update output statistics and add lineage
sec.setRDDHandleForVariable(output.getName(), out);
updateUnaryOutputMatrixCharacteristics(sec, input1.getName(), output.getName());
sec.addLineageRDD(output.getName(), input1.getName());
// update schema information for output frame
if (opcode.equals(UnaryCP.CAST_AS_FRAME_OPCODE)) {
sec.getFrameObject(output.getName()).setSchema(UtilFunctions.nCopies((int) mcIn.getCols(), ValueType.DOUBLE));
}
}
use of org.apache.sysml.runtime.matrix.data.MatrixBlock in project incubator-systemml by apache.
the class CentralMomentSPInstruction method processInstruction.
@Override
public void processInstruction(ExecutionContext ec) {
SparkExecutionContext sec = (SparkExecutionContext) ec;
// parse 'order' input argument
CPOperand scalarInput = (input3 == null ? input2 : input3);
ScalarObject order = ec.getScalarInput(scalarInput.getName(), scalarInput.getValueType(), scalarInput.isLiteral());
CMOperator cop = ((CMOperator) _optr);
if (cop.getAggOpType() == AggregateOperationTypes.INVALID) {
cop.setCMAggOp((int) order.getLongValue());
}
// get input
JavaPairRDD<MatrixIndexes, MatrixBlock> in1 = sec.getBinaryBlockRDDHandleForVariable(input1.getName());
// process central moment instruction
CM_COV_Object cmobj = null;
if (// w/o weights
input3 == null) {
cmobj = in1.values().map(new RDDCMFunction(cop)).fold(new CM_COV_Object(), new RDDCMReduceFunction(cop));
} else // with weights
{
JavaPairRDD<MatrixIndexes, MatrixBlock> in2 = sec.getBinaryBlockRDDHandleForVariable(input2.getName());
cmobj = in1.join(in2).values().map(new RDDCMWeightsFunction(cop)).fold(new CM_COV_Object(), new RDDCMReduceFunction(cop));
}
// create scalar output (no lineage information required)
double val = cmobj.getRequiredResult(_optr);
ec.setScalarOutput(output.getName(), new DoubleObject(val));
}
use of org.apache.sysml.runtime.matrix.data.MatrixBlock in project incubator-systemml by apache.
the class ConvolutionSPInstruction method getBroadcast.
private Broadcast<MatrixBlock> getBroadcast(SparkExecutionContext sec, String name) {
MatrixBlock mb = sec.getMatrixInput(name, getExtendedOpcode());
sec.releaseMatrixInput(name, getExtendedOpcode());
return sec.getSparkContext().broadcast(mb);
}
use of org.apache.sysml.runtime.matrix.data.MatrixBlock in project incubator-systemml by apache.
the class ReplicateBlockFunction method call.
@Override
public Iterator<Tuple2<MatrixIndexes, MatrixBlock>> call(Tuple2<MatrixIndexes, MatrixBlock> arg0) throws Exception {
ArrayList<Tuple2<MatrixIndexes, MatrixBlock>> ret = new ArrayList<>();
MatrixIndexes ixIn = arg0._1();
MatrixBlock blkIn = arg0._2();
long numBlocks = (long) Math.ceil((double) _len / _blen);
if (// LHS MATRIX
_left) {
// replicate wrt # column blocks in RHS
long i = ixIn.getRowIndex();
for (long j = 1; j <= numBlocks; j++) {
MatrixIndexes tmpix = new MatrixIndexes(i, j);
MatrixBlock tmpblk = _deep ? new MatrixBlock(blkIn) : blkIn;
ret.add(new Tuple2<>(tmpix, tmpblk));
}
} else // RHS MATRIX
{
// replicate wrt # row blocks in LHS
long j = ixIn.getColumnIndex();
for (long i = 1; i <= numBlocks; i++) {
MatrixIndexes tmpix = new MatrixIndexes(i, j);
MatrixBlock tmpblk = _deep ? new MatrixBlock(blkIn) : blkIn;
ret.add(new Tuple2<>(tmpix, tmpblk));
}
}
// output list of new tuples
return ret.iterator();
}
Aggregations