use of org.apache.sysml.runtime.instructions.cp.CM_COV_Object in project incubator-systemml by apache.
the class PerformGroupByAggInReducer method call.
@Override
public WeightedCell call(Iterable<WeightedCell> kv) throws Exception {
WeightedCell outCell = new WeightedCell();
CM_COV_Object cmObj = new CM_COV_Object();
if (// everything except sum
op instanceof CMOperator) {
cmObj.reset();
// cmFn.get(key.getTag());
CM lcmFn = CM.getCMFnObject(((CMOperator) op).aggOpType);
if (((CMOperator) op).isPartialAggregateOperator()) {
throw new DMLRuntimeException("Incorrect usage, should have used PerformGroupByAggInCombiner");
} else // forward tuples to reducer
{
for (WeightedCell value : kv) lcmFn.execute(cmObj, value.getValue(), value.getWeight());
outCell.setValue(cmObj.getRequiredResult(op));
outCell.setWeight(1);
}
} else if (// sum
op instanceof AggregateOperator) {
AggregateOperator aggop = (AggregateOperator) op;
if (aggop.correctionExists) {
KahanObject buffer = new KahanObject(aggop.initialValue, 0);
KahanPlus.getKahanPlusFnObject();
// partial aggregate with correction
for (WeightedCell value : kv) aggop.increOp.fn.execute(buffer, value.getValue() * value.getWeight());
outCell.setValue(buffer._sum);
outCell.setWeight(1);
} else // no correction
{
double v = aggop.initialValue;
// partial aggregate without correction
for (WeightedCell value : kv) v = aggop.increOp.fn.execute(v, value.getValue() * value.getWeight());
outCell.setValue(v);
outCell.setWeight(1);
}
} else
throw new DMLRuntimeException("Unsupported operator in grouped aggregate instruction:" + op);
return outCell;
}
use of org.apache.sysml.runtime.instructions.cp.CM_COV_Object in project incubator-systemml by apache.
the class MatrixBlock method incrementalAggregate.
@Override
public void incrementalAggregate(AggregateOperator aggOp, MatrixValue newWithCorrection) {
// assert(aggOp.correctionExists);
MatrixBlock newWithCor = checkType(newWithCorrection);
KahanObject buffer = new KahanObject(0, 0);
if (aggOp.correctionLocation == CorrectionLocationType.LASTROW) {
if (aggOp.increOp.fn instanceof KahanPlus) {
LibMatrixAgg.aggregateBinaryMatrix(newWithCor, this, aggOp);
} else {
for (int r = 0; r < rlen - 1; r++) for (int c = 0; c < clen; c++) {
buffer._sum = this.quickGetValue(r, c);
buffer._correction = this.quickGetValue(r + 1, c);
buffer = (KahanObject) aggOp.increOp.fn.execute(buffer, newWithCor.quickGetValue(r, c), newWithCor.quickGetValue(r + 1, c));
quickSetValue(r, c, buffer._sum);
quickSetValue(r + 1, c, buffer._correction);
}
}
} else if (aggOp.correctionLocation == CorrectionLocationType.LASTCOLUMN) {
if (aggOp.increOp.fn instanceof Builtin && (((Builtin) (aggOp.increOp.fn)).bFunc == Builtin.BuiltinCode.MAXINDEX || ((Builtin) (aggOp.increOp.fn)).bFunc == Builtin.BuiltinCode.MININDEX)) {
// modified, the other needs to be changed to match.
for (int r = 0; r < rlen; r++) {
double currMaxValue = quickGetValue(r, 1);
long newMaxIndex = (long) newWithCor.quickGetValue(r, 0);
double newMaxValue = newWithCor.quickGetValue(r, 1);
double update = aggOp.increOp.fn.execute(newMaxValue, currMaxValue);
if (2.0 == update) {
// Return value of 2 ==> both values the same, break ties
// in favor of higher index.
long curMaxIndex = (long) quickGetValue(r, 0);
quickSetValue(r, 0, Math.max(curMaxIndex, newMaxIndex));
} else if (1.0 == update) {
// Return value of 1 ==> new value is better; use its index
quickSetValue(r, 0, newMaxIndex);
quickSetValue(r, 1, newMaxValue);
} else {
// Other return value ==> current answer is best
}
}
// *** END HACK ***
} else {
if (aggOp.increOp.fn instanceof KahanPlus) {
LibMatrixAgg.aggregateBinaryMatrix(newWithCor, this, aggOp);
} else {
for (int r = 0; r < rlen; r++) for (int c = 0; c < clen - 1; c++) {
buffer._sum = this.quickGetValue(r, c);
buffer._correction = this.quickGetValue(r, c + 1);
buffer = (KahanObject) aggOp.increOp.fn.execute(buffer, newWithCor.quickGetValue(r, c), newWithCor.quickGetValue(r, c + 1));
quickSetValue(r, c, buffer._sum);
quickSetValue(r, c + 1, buffer._correction);
}
}
}
} else if (aggOp.correctionLocation == CorrectionLocationType.LASTTWOROWS) {
double n, n2, mu2;
for (int r = 0; r < rlen - 2; r++) for (int c = 0; c < clen; c++) {
buffer._sum = this.quickGetValue(r, c);
n = this.quickGetValue(r + 1, c);
buffer._correction = this.quickGetValue(r + 2, c);
mu2 = newWithCor.quickGetValue(r, c);
n2 = newWithCor.quickGetValue(r + 1, c);
n = n + n2;
double toadd = (mu2 - buffer._sum) * n2 / n;
buffer = (KahanObject) aggOp.increOp.fn.execute(buffer, toadd);
quickSetValue(r, c, buffer._sum);
quickSetValue(r + 1, c, n);
quickSetValue(r + 2, c, buffer._correction);
}
} else if (aggOp.correctionLocation == CorrectionLocationType.LASTTWOCOLUMNS) {
double n, n2, mu2;
for (int r = 0; r < rlen; r++) for (int c = 0; c < clen - 2; c++) {
buffer._sum = this.quickGetValue(r, c);
n = this.quickGetValue(r, c + 1);
buffer._correction = this.quickGetValue(r, c + 2);
mu2 = newWithCor.quickGetValue(r, c);
n2 = newWithCor.quickGetValue(r, c + 1);
n = n + n2;
double toadd = (mu2 - buffer._sum) * n2 / n;
buffer = (KahanObject) aggOp.increOp.fn.execute(buffer, toadd);
quickSetValue(r, c, buffer._sum);
quickSetValue(r, c + 1, n);
quickSetValue(r, c + 2, buffer._correction);
}
} else if (aggOp.correctionLocation == CorrectionLocationType.LASTFOURROWS && aggOp.increOp.fn instanceof CM && ((CM) aggOp.increOp.fn).getAggOpType() == AggregateOperationTypes.VARIANCE) {
// create buffers to store results
CM_COV_Object cbuff_curr = new CM_COV_Object();
CM_COV_Object cbuff_part = new CM_COV_Object();
// perform incremental aggregation
for (int r = 0; r < rlen - 4; r++) for (int c = 0; c < clen; c++) {
// extract current values: { var | mean, count, m2 correction, mean correction }
// note: m2 = var * (n - 1)
// count
cbuff_curr.w = quickGetValue(r + 2, c);
// m2
cbuff_curr.m2._sum = quickGetValue(r, c) * (cbuff_curr.w - 1);
// mean
cbuff_curr.mean._sum = quickGetValue(r + 1, c);
cbuff_curr.m2._correction = quickGetValue(r + 3, c);
cbuff_curr.mean._correction = quickGetValue(r + 4, c);
// extract partial values: { var | mean, count, m2 correction, mean correction }
// note: m2 = var * (n - 1)
// count
cbuff_part.w = newWithCor.quickGetValue(r + 2, c);
// m2
cbuff_part.m2._sum = newWithCor.quickGetValue(r, c) * (cbuff_part.w - 1);
// mean
cbuff_part.mean._sum = newWithCor.quickGetValue(r + 1, c);
cbuff_part.m2._correction = newWithCor.quickGetValue(r + 3, c);
cbuff_part.mean._correction = newWithCor.quickGetValue(r + 4, c);
// calculate incremental aggregated variance
cbuff_curr = (CM_COV_Object) aggOp.increOp.fn.execute(cbuff_curr, cbuff_part);
// store updated values: { var | mean, count, m2 correction, mean correction }
double var = cbuff_curr.getRequiredResult(AggregateOperationTypes.VARIANCE);
quickSetValue(r, c, var);
// mean
quickSetValue(r + 1, c, cbuff_curr.mean._sum);
// count
quickSetValue(r + 2, c, cbuff_curr.w);
quickSetValue(r + 3, c, cbuff_curr.m2._correction);
quickSetValue(r + 4, c, cbuff_curr.mean._correction);
}
} else if (aggOp.correctionLocation == CorrectionLocationType.LASTFOURCOLUMNS && aggOp.increOp.fn instanceof CM && ((CM) aggOp.increOp.fn).getAggOpType() == AggregateOperationTypes.VARIANCE) {
// create buffers to store results
CM_COV_Object cbuff_curr = new CM_COV_Object();
CM_COV_Object cbuff_part = new CM_COV_Object();
// perform incremental aggregation
for (int r = 0; r < rlen; r++) for (int c = 0; c < clen - 4; c++) {
// extract current values: { var | mean, count, m2 correction, mean correction }
// note: m2 = var * (n - 1)
// count
cbuff_curr.w = quickGetValue(r, c + 2);
// m2
cbuff_curr.m2._sum = quickGetValue(r, c) * (cbuff_curr.w - 1);
// mean
cbuff_curr.mean._sum = quickGetValue(r, c + 1);
cbuff_curr.m2._correction = quickGetValue(r, c + 3);
cbuff_curr.mean._correction = quickGetValue(r, c + 4);
// extract partial values: { var | mean, count, m2 correction, mean correction }
// note: m2 = var * (n - 1)
// count
cbuff_part.w = newWithCor.quickGetValue(r, c + 2);
// m2
cbuff_part.m2._sum = newWithCor.quickGetValue(r, c) * (cbuff_part.w - 1);
// mean
cbuff_part.mean._sum = newWithCor.quickGetValue(r, c + 1);
cbuff_part.m2._correction = newWithCor.quickGetValue(r, c + 3);
cbuff_part.mean._correction = newWithCor.quickGetValue(r, c + 4);
// calculate incremental aggregated variance
cbuff_curr = (CM_COV_Object) aggOp.increOp.fn.execute(cbuff_curr, cbuff_part);
// store updated values: { var | mean, count, m2 correction, mean correction }
double var = cbuff_curr.getRequiredResult(AggregateOperationTypes.VARIANCE);
quickSetValue(r, c, var);
// mean
quickSetValue(r, c + 1, cbuff_curr.mean._sum);
// count
quickSetValue(r, c + 2, cbuff_curr.w);
quickSetValue(r, c + 3, cbuff_curr.m2._correction);
quickSetValue(r, c + 4, cbuff_curr.mean._correction);
}
} else
throw new DMLRuntimeException("unrecognized correctionLocation: " + aggOp.correctionLocation);
}
use of org.apache.sysml.runtime.instructions.cp.CM_COV_Object in project incubator-systemml by apache.
the class MatrixBlock method cmOperations.
public CM_COV_Object cmOperations(CMOperator op) {
// dimension check for input column vectors
if (this.getNumColumns() != 1) {
throw new DMLRuntimeException("Central Moment can not be computed on [" + this.getNumRows() + "," + this.getNumColumns() + "] matrix.");
}
CM_COV_Object cmobj = new CM_COV_Object();
// we get a NaN due to 0/0 on reading out the required result)
if (isEmptyBlock(false)) {
op.fn.execute(cmobj, 0.0, getNumRows());
return cmobj;
}
int nzcount = 0;
if (// SPARSE
sparse && sparseBlock != null) {
for (int r = 0; r < Math.min(rlen, sparseBlock.numRows()); r++) {
if (sparseBlock.isEmpty(r))
continue;
int apos = sparseBlock.pos(r);
int alen = sparseBlock.size(r);
double[] avals = sparseBlock.values(r);
for (int i = apos; i < apos + alen; i++) {
op.fn.execute(cmobj, avals[i]);
nzcount++;
}
}
// account for zeros in the vector
op.fn.execute(cmobj, 0.0, this.getNumRows() - nzcount);
} else if (// DENSE
denseBlock != null) {
// always vector (see check above)
double[] a = getDenseBlockValues();
for (int i = 0; i < rlen; i++) op.fn.execute(cmobj, a[i]);
}
return cmobj;
}
use of org.apache.sysml.runtime.instructions.cp.CM_COV_Object in project incubator-systemml by apache.
the class CMCOVMRReducer method configure.
@Override
public void configure(JobConf job) {
super.configure(job);
try {
cmNcovInstructions = MRJobConfiguration.getCM_N_COVInstructions(job);
} catch (Exception e) {
throw new RuntimeException(e);
}
rlens = new HashMap<>();
clens = new HashMap<>();
for (CM_N_COVInstruction ins : cmNcovInstructions) {
if (ins.getOperator() instanceof COVOperator)
covTags.add(ins.input);
else
// CMOperator
cmFn.put(ins.input, CM.getCMFnObject(((CMOperator) ins.getOperator()).getAggOpType()));
outputIndexesMapping.put(ins.output, getOutputIndexes(ins.output));
rlens.put(ins.input, MRJobConfiguration.getNumRows(job, ins.input));
clens.put(ins.input, MRJobConfiguration.getNumColumns(job, ins.input));
}
zeroObj = new CM_COV_Object();
zeroObj.w = 1;
}
use of org.apache.sysml.runtime.instructions.cp.CM_COV_Object in project systemml by apache.
the class CM method execute.
/**
* Special case for weights w2==1
*/
@Override
public Data execute(Data in1, double in2) {
CM_COV_Object cm1 = (CM_COV_Object) in1;
if (cm1.isCMAllZeros()) {
cm1.w = 1;
cm1.mean.set(in2, 0);
cm1.m2.set(0, 0);
cm1.m3.set(0, 0);
cm1.m4.set(0, 0);
return cm1;
}
switch(_type) {
case COUNT:
{
cm1.w = cm1.w + 1;
break;
}
case MEAN:
{
double w = cm1.w + 1;
double d = in2 - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, d / w);
cm1.w = w;
break;
}
case CM2:
{
double w = cm1.w + 1;
double d = in2 - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, d / w);
double t1 = cm1.w / w * d;
double lt1 = t1 * d;
_buff2.set(cm1.m2);
_buff2 = (KahanObject) _plus.execute(_buff2, lt1);
cm1.m2.set(_buff2);
cm1.w = w;
break;
}
case CM3:
{
double w = cm1.w + 1;
double d = in2 - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, d / w);
double t1 = cm1.w / w * d;
double t2 = -1 / cm1.w;
double lt1 = t1 * d;
double lt2 = Math.pow(t1, 3) * (1.0 - Math.pow(t2, 2));
double f2 = 1.0 / w;
_buff2.set(cm1.m2);
_buff2 = (KahanObject) _plus.execute(_buff2, lt1);
_buff3.set(cm1.m3);
_buff3 = (KahanObject) _plus.execute(_buff3, lt2 - 3 * cm1.m2._sum * f2 * d);
cm1.m2.set(_buff2);
cm1.m3.set(_buff3);
cm1.w = w;
break;
}
case CM4:
{
double w = cm1.w + 1;
double d = in2 - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, d / w);
double t1 = cm1.w / w * d;
double t2 = -1 / cm1.w;
double lt1 = t1 * d;
double lt2 = Math.pow(t1, 3) * (1.0 - Math.pow(t2, 2));
double lt3 = Math.pow(t1, 4) * (1.0 - Math.pow(t2, 3));
double f2 = 1.0 / w;
_buff2.set(cm1.m2);
_buff2 = (KahanObject) _plus.execute(_buff2, lt1);
_buff3.set(cm1.m3);
_buff3 = (KahanObject) _plus.execute(_buff3, lt2 - 3 * cm1.m2._sum * f2 * d);
cm1.m4 = (KahanObject) _plus.execute(cm1.m4, 6 * cm1.m2._sum * Math.pow(-f2 * d, 2) + lt3 - 4 * cm1.m3._sum * f2 * d);
cm1.m2.set(_buff2);
cm1.m3.set(_buff3);
cm1.w = w;
break;
}
case VARIANCE:
{
double w = cm1.w + 1;
double d = in2 - cm1.mean._sum;
cm1.mean = (KahanObject) _plus.execute(cm1.mean, d / w);
double t1 = cm1.w / w * d;
double lt1 = t1 * d;
cm1.m2 = (KahanObject) _plus.execute(cm1.m2, lt1);
cm1.w = w;
break;
}
default:
throw new DMLRuntimeException("Unsupported operation type: " + _type);
}
return cm1;
}
Aggregations