use of org.apache.sysml.runtime.instructions.cp.KahanObject in project incubator-systemml by apache.
the class MatrixBlock method denseAggregateUnaryHelp.
private void denseAggregateUnaryHelp(AggregateUnaryOperator op, MatrixBlock result, int blockingFactorRow, int blockingFactorCol, MatrixIndexes indexesIn) {
if (op.aggOp.initialValue != 0)
result.reset(result.rlen, result.clen, op.aggOp.initialValue);
CellIndex tempCellIndex = new CellIndex(-1, -1);
KahanObject buffer = new KahanObject(0, 0);
for (int i = 0; i < rlen; i++) for (int j = 0; j < clen; j++) {
tempCellIndex.set(i, j);
op.indexFn.execute(tempCellIndex, tempCellIndex);
incrementalAggregateUnaryHelp(op.aggOp, result, tempCellIndex.row, tempCellIndex.column, quickGetValue(i, j), buffer);
}
}
use of org.apache.sysml.runtime.instructions.cp.KahanObject in project incubator-systemml by apache.
the class GroupedAggMRCombiner method reduce.
@Override
public void reduce(TaggedMatrixIndexes key, Iterator<WeightedCell> values, OutputCollector<TaggedMatrixIndexes, WeightedCell> out, Reporter reporter) throws IOException {
long start = System.currentTimeMillis();
// get aggregate operator
GroupedAggregateInstruction ins = grpaggInstructions.get(key.getTag());
Operator op = ins.getOperator();
boolean isPartialAgg = true;
// combine iterator to single value
try {
if (// everything except sum
op instanceof CMOperator) {
if (((CMOperator) op).isPartialAggregateOperator()) {
cmObj.reset();
CM lcmFn = cmFn.get(key.getTag());
// partial aggregate cm operator
while (values.hasNext()) {
WeightedCell value = values.next();
lcmFn.execute(cmObj, value.getValue(), value.getWeight());
}
outCell.setValue(cmObj.getRequiredPartialResult(op));
outCell.setWeight(cmObj.getWeight());
} else // forward tuples to reducer
{
isPartialAgg = false;
while (values.hasNext()) out.collect(key, values.next());
}
} else if (// sum
op instanceof AggregateOperator) {
AggregateOperator aggop = (AggregateOperator) op;
if (aggop.correctionExists) {
KahanObject buffer = new KahanObject(aggop.initialValue, 0);
KahanPlus.getKahanPlusFnObject();
// partial aggregate with correction
while (values.hasNext()) {
WeightedCell value = values.next();
aggop.increOp.fn.execute(buffer, value.getValue() * value.getWeight());
}
outCell.setValue(buffer._sum);
outCell.setWeight(1);
} else // no correction
{
double v = aggop.initialValue;
// partial aggregate without correction
while (values.hasNext()) {
WeightedCell value = values.next();
v = aggop.increOp.fn.execute(v, value.getValue() * value.getWeight());
}
outCell.setValue(v);
outCell.setWeight(1);
}
} else
throw new IOException("Unsupported operator in instruction: " + ins);
} catch (Exception ex) {
throw new IOException(ex);
}
// collect the output (to reducer)
if (isPartialAgg)
out.collect(key, outCell);
reporter.incrCounter(Counters.COMBINE_OR_REDUCE_TIME, System.currentTimeMillis() - start);
}
use of org.apache.sysml.runtime.instructions.cp.KahanObject in project incubator-systemml by apache.
the class GroupedAggMRReducer method reduce.
@Override
public void reduce(TaggedMatrixIndexes key, Iterator<WeightedCell> values, OutputCollector<MatrixIndexes, MatrixCell> out, Reporter report) throws IOException {
commonSetup(report);
// get operator
GroupedAggregateInstruction ins = grpaggInstructions.get(key.getTag());
Operator op = ins.getOperator();
try {
if (// all, but sum
op instanceof CMOperator) {
cmObj.reset();
CM lcmFn = cmFn.get(key.getTag());
while (values.hasNext()) {
WeightedCell value = values.next();
lcmFn.execute(cmObj, value.getValue(), value.getWeight());
}
outCell.setValue(cmObj.getRequiredResult(op));
} else if (// sum
op instanceof AggregateOperator) {
AggregateOperator aggop = (AggregateOperator) op;
if (aggop.correctionExists) {
KahanObject buffer = new KahanObject(aggop.initialValue, 0);
while (values.hasNext()) {
WeightedCell value = values.next();
aggop.increOp.fn.execute(buffer, value.getValue() * value.getWeight());
}
outCell.setValue(buffer._sum);
} else {
double v = aggop.initialValue;
while (values.hasNext()) {
WeightedCell value = values.next();
v = aggop.increOp.fn.execute(v, value.getValue() * value.getWeight());
}
outCell.setValue(v);
}
} else
throw new IOException("Unsupported operator in instruction: " + ins);
} catch (Exception ex) {
throw new IOException(ex);
}
outIndex.setIndexes(key.getBaseObject());
cachedValues.reset();
cachedValues.set(key.getTag(), outIndex, outCell);
processReducerInstructions();
// output the final result matrices
outputResultsFromCachedValues(report);
}
use of org.apache.sysml.runtime.instructions.cp.KahanObject in project incubator-systemml by apache.
the class CM method execute.
/**
* Combining stats from two partitions of the data.
*/
@Override
public Data execute(Data in1, Data in2) {
CM_COV_Object cm1 = (CM_COV_Object) in1;
CM_COV_Object cm2 = (CM_COV_Object) in2;
if (cm1.isCMAllZeros()) {
cm1.w = cm2.w;
cm1.mean.set(cm2.mean);
cm1.m2.set(cm2.m2);
cm1.m3.set(cm2.m3);
cm1.m4.set(cm2.m4);
return cm1;
}
if (cm2.isCMAllZeros())
return cm1;
switch(_type) {
case COUNT:
{
cm1.w = Math.round(cm1.w + cm2.w);
break;
}
case MEAN:
{
double w = cm1.w + cm2.w;
double d = cm2.mean._sum - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, cm2.w * d / w);
cm1.w = w;
break;
}
case CM2:
{
double w = cm1.w + cm2.w;
double d = cm2.mean._sum - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, cm2.w * d / w);
double t1 = cm1.w * cm2.w / w * d;
double lt1 = t1 * d;
_buff2.set(cm1.m2);
_buff2 = (KahanObject) _plus.execute(_buff2, cm2.m2._sum, cm2.m2._correction);
_buff2 = (KahanObject) _plus.execute(_buff2, lt1);
cm1.m2.set(_buff2);
cm1.w = w;
break;
}
case CM3:
{
double w = cm1.w + cm2.w;
double d = cm2.mean._sum - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, cm2.w * d / w);
double t1 = cm1.w * cm2.w / w * d;
double t2 = -1 / cm1.w;
double lt1 = t1 * d;
double lt2 = Math.pow(t1, 3) * (1 / Math.pow(cm2.w, 2) - Math.pow(t2, 2));
double f1 = cm1.w / w;
double f2 = cm2.w / w;
_buff2.set(cm1.m2);
_buff2 = (KahanObject) _plus.execute(_buff2, cm2.m2._sum, cm2.m2._correction);
_buff2 = (KahanObject) _plus.execute(_buff2, lt1);
_buff3.set(cm1.m3);
_buff3 = (KahanObject) _plus.execute(_buff3, cm2.m3._sum, cm2.m3._correction);
_buff3 = (KahanObject) _plus.execute(_buff3, 3 * (-f2 * cm1.m2._sum + f1 * cm2.m2._sum) * d + lt2);
cm1.m2.set(_buff2);
cm1.m3.set(_buff3);
cm1.w = w;
break;
}
case CM4:
{
double w = cm1.w + cm2.w;
double d = cm2.mean._sum - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, cm2.w * d / w);
double t1 = cm1.w * cm2.w / w * d;
double t2 = -1 / cm1.w;
double lt1 = t1 * d;
double lt2 = Math.pow(t1, 3) * (1 / Math.pow(cm2.w, 2) - Math.pow(t2, 2));
double lt3 = Math.pow(t1, 4) * (1 / Math.pow(cm2.w, 3) - Math.pow(t2, 3));
double f1 = cm1.w / w;
double f2 = cm2.w / w;
_buff2.set(cm1.m2);
_buff2 = (KahanObject) _plus.execute(_buff2, cm2.m2._sum, cm2.m2._correction);
_buff2 = (KahanObject) _plus.execute(_buff2, lt1);
_buff3.set(cm1.m3);
_buff3 = (KahanObject) _plus.execute(_buff3, cm2.m3._sum, cm2.m3._correction);
_buff3 = (KahanObject) _plus.execute(_buff3, 3 * (-f2 * cm1.m2._sum + f1 * cm2.m2._sum) * d + lt2);
cm1.m4 = (KahanObject) _plus.execute(cm1.m4, cm2.m4._sum, cm2.m4._correction);
cm1.m4 = (KahanObject) _plus.execute(cm1.m4, 4 * (-f2 * cm1.m3._sum + f1 * cm2.m3._sum) * d + 6 * (Math.pow(-f2, 2) * cm1.m2._sum + Math.pow(f1, 2) * cm2.m2._sum) * Math.pow(d, 2) + lt3);
cm1.m2.set(_buff2);
cm1.m3.set(_buff3);
cm1.w = w;
break;
}
case VARIANCE:
{
double w = cm1.w + cm2.w;
double d = cm2.mean._sum - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, cm2.w * d / w);
double t1 = cm1.w * cm2.w / w * d;
double lt1 = t1 * d;
cm1.m2 = (KahanObject) _plus.execute(cm1.m2, cm2.m2._sum, cm2.m2._correction);
cm1.m2 = (KahanObject) _plus.execute(cm1.m2, lt1);
cm1.w = w;
break;
}
default:
throw new DMLRuntimeException("Unsupported operation type: " + _type);
}
return cm1;
}
use of org.apache.sysml.runtime.instructions.cp.KahanObject in project incubator-systemml by apache.
the class SpoofCellwise method execute.
@Override
public ScalarObject execute(ArrayList<MatrixBlock> inputs, ArrayList<ScalarObject> scalarObjects, int k) {
// sanity check
if (inputs == null || inputs.size() < 1)
throw new RuntimeException("Invalid input arguments.");
// input preparation
MatrixBlock a = inputs.get(0);
SideInput[] b = prepInputMatrices(inputs);
double[] scalars = prepInputScalars(scalarObjects);
final int m = a.getNumRows();
final int n = a.getNumColumns();
// sparse safe check
boolean sparseSafe = isSparseSafe() || (b.length == 0 && genexec(0, b, scalars, m, n, 0, 0) == 0);
long inputSize = sparseSafe ? getTotalInputNnz(inputs) : getTotalInputSize(inputs);
if (inputSize < PAR_NUMCELL_THRESHOLD) {
// serial execution
k = 1;
}
double ret = 0;
if (// SINGLE-THREADED
k <= 1) {
if (inputs.get(0) instanceof CompressedMatrixBlock)
ret = executeCompressedAndAgg((CompressedMatrixBlock) a, b, scalars, m, n, sparseSafe, 0, m);
else if (!inputs.get(0).isInSparseFormat())
ret = executeDenseAndAgg(a.getDenseBlock(), b, scalars, m, n, sparseSafe, 0, m);
else
ret = executeSparseAndAgg(a.getSparseBlock(), b, scalars, m, n, sparseSafe, 0, m);
} else // MULTI-THREADED
{
try {
ExecutorService pool = CommonThreadPool.get(k);
ArrayList<ParAggTask> tasks = new ArrayList<>();
int nk = (a instanceof CompressedMatrixBlock) ? k : UtilFunctions.roundToNext(Math.min(8 * k, m / 32), k);
int blklen = (int) (Math.ceil((double) m / nk));
if (a instanceof CompressedMatrixBlock)
blklen = BitmapEncoder.getAlignedBlocksize(blklen);
for (int i = 0; i < nk & i * blklen < m; i++) tasks.add(new ParAggTask(a, b, scalars, m, n, sparseSafe, i * blklen, Math.min((i + 1) * blklen, m)));
// execute tasks
List<Future<Double>> taskret = pool.invokeAll(tasks);
pool.shutdown();
// aggregate partial results
ValueFunction vfun = getAggFunction();
if (vfun instanceof KahanFunction) {
KahanObject kbuff = new KahanObject(0, 0);
KahanPlus kplus = KahanPlus.getKahanPlusFnObject();
for (Future<Double> task : taskret) kplus.execute2(kbuff, task.get());
ret = kbuff._sum;
} else {
for (Future<Double> task : taskret) ret = vfun.execute(ret, task.get());
}
} catch (Exception ex) {
throw new DMLRuntimeException(ex);
}
}
// correction for min/max
if ((_aggOp == AggOp.MIN || _aggOp == AggOp.MAX) && sparseSafe && a.getNonZeros() < a.getNumRows() * a.getNumColumns())
// unseen 0 might be max or min value
ret = getAggFunction().execute(ret, 0);
return new DoubleObject(ret);
}
Aggregations