use of org.apache.sysml.runtime.util.LongLongDoubleHashMap.ADoubleEntry in project incubator-systemml by apache.
the class SparseBlockGetSet method runSparseBlockGetSetTest.
/**
* @param sparseM1
* @param sparseM2
* @param instType
*/
private void runSparseBlockGetSetTest(SparseBlock.Type btype, double sparsity, InitType itype) {
try {
// data generation
double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 7654321);
// init sparse block
SparseBlock sblock = null;
if (itype == InitType.BULK) {
MatrixBlock mbtmp = DataConverter.convertToMatrixBlock(A);
SparseBlock srtmp = mbtmp.getSparseBlock();
switch(btype) {
case MCSR:
sblock = new SparseBlockMCSR(srtmp);
break;
case CSR:
sblock = new SparseBlockCSR(srtmp);
break;
case COO:
sblock = new SparseBlockCOO(srtmp);
break;
}
} else if (itype == InitType.SEQ_SET || itype == InitType.RAND_SET) {
switch(btype) {
case MCSR:
sblock = new SparseBlockMCSR(rows, cols);
break;
case CSR:
sblock = new SparseBlockCSR(rows, cols);
break;
case COO:
sblock = new SparseBlockCOO(rows, cols);
break;
}
if (itype == InitType.SEQ_SET) {
for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) sblock.append(i, j, A[i][j]);
} else if (itype == InitType.RAND_SET) {
LongLongDoubleHashMap map = new LongLongDoubleHashMap();
for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) map.addValue(i, j, A[i][j]);
Iterator<ADoubleEntry> iter = map.getIterator();
while (iter.hasNext()) {
// random hash order
ADoubleEntry e = iter.next();
sblock.set((int) e.getKey1(), (int) e.getKey2(), e.value);
}
}
}
// check basic meta data
if (sblock.numRows() != rows)
Assert.fail("Wrong number of rows: " + sblock.numRows() + ", expected: " + rows);
// check for correct number of non-zeros
int[] rnnz = new int[rows];
int nnz = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) rnnz[i] += (A[i][j] != 0) ? 1 : 0;
nnz += rnnz[i];
}
if (nnz != sblock.size())
Assert.fail("Wrong number of non-zeros: " + sblock.size() + ", expected: " + nnz);
// check correct isEmpty return
for (int i = 0; i < rows; i++) if (sblock.isEmpty(i) != (rnnz[i] == 0))
Assert.fail("Wrong isEmpty(row) result for row nnz: " + rnnz[i]);
// check correct values
for (int i = 0; i < rows; i++) if (!sblock.isEmpty(i))
for (int j = 0; j < cols; j++) {
double tmp = sblock.get(i, j);
if (tmp != A[i][j])
Assert.fail("Wrong get value for cell (" + i + "," + j + "): " + tmp + ", expected: " + A[i][j]);
}
} catch (Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
}
}
use of org.apache.sysml.runtime.util.LongLongDoubleHashMap.ADoubleEntry in project systemml by apache.
the class SparseBlockGetSet method runSparseBlockGetSetTest.
/**
* @param sparseM1
* @param sparseM2
* @param instType
*/
private void runSparseBlockGetSetTest(SparseBlock.Type btype, double sparsity, InitType itype) {
try {
// data generation
double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 7654321);
// init sparse block
SparseBlock sblock = null;
if (itype == InitType.BULK) {
MatrixBlock mbtmp = DataConverter.convertToMatrixBlock(A);
SparseBlock srtmp = mbtmp.getSparseBlock();
switch(btype) {
case MCSR:
sblock = new SparseBlockMCSR(srtmp);
break;
case CSR:
sblock = new SparseBlockCSR(srtmp);
break;
case COO:
sblock = new SparseBlockCOO(srtmp);
break;
}
} else if (itype == InitType.SEQ_SET || itype == InitType.RAND_SET) {
switch(btype) {
case MCSR:
sblock = new SparseBlockMCSR(rows, cols);
break;
case CSR:
sblock = new SparseBlockCSR(rows, cols);
break;
case COO:
sblock = new SparseBlockCOO(rows, cols);
break;
}
if (itype == InitType.SEQ_SET) {
for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) sblock.append(i, j, A[i][j]);
} else if (itype == InitType.RAND_SET) {
LongLongDoubleHashMap map = new LongLongDoubleHashMap();
for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) map.addValue(i, j, A[i][j]);
Iterator<ADoubleEntry> iter = map.getIterator();
while (iter.hasNext()) {
// random hash order
ADoubleEntry e = iter.next();
sblock.set((int) e.getKey1(), (int) e.getKey2(), e.value);
}
}
}
// check basic meta data
if (sblock.numRows() != rows)
Assert.fail("Wrong number of rows: " + sblock.numRows() + ", expected: " + rows);
// check for correct number of non-zeros
int[] rnnz = new int[rows];
int nnz = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) rnnz[i] += (A[i][j] != 0) ? 1 : 0;
nnz += rnnz[i];
}
if (nnz != sblock.size())
Assert.fail("Wrong number of non-zeros: " + sblock.size() + ", expected: " + nnz);
// check correct isEmpty return
for (int i = 0; i < rows; i++) if (sblock.isEmpty(i) != (rnnz[i] == 0))
Assert.fail("Wrong isEmpty(row) result for row nnz: " + rnnz[i]);
// check correct values
for (int i = 0; i < rows; i++) if (!sblock.isEmpty(i))
for (int j = 0; j < cols; j++) {
double tmp = sblock.get(i, j);
if (tmp != A[i][j])
Assert.fail("Wrong get value for cell (" + i + "," + j + "): " + tmp + ", expected: " + A[i][j]);
}
} catch (Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
}
}
use of org.apache.sysml.runtime.util.LongLongDoubleHashMap.ADoubleEntry in project systemml by apache.
the class SparseBlockAppendSort method runSparseBlockAppendSortTest.
/**
* @param sparseM1
* @param sparseM2
* @param instType
*/
private void runSparseBlockAppendSortTest(SparseBlock.Type btype, double sparsity, InitType itype) {
try {
// data generation
double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 7654321);
// init sparse block
SparseBlock sblock = null;
switch(btype) {
case MCSR:
sblock = new SparseBlockMCSR(rows, cols);
break;
case CSR:
sblock = new SparseBlockCSR(rows, cols);
break;
case COO:
sblock = new SparseBlockCOO(rows, cols);
break;
}
if (itype == InitType.SEQ_SET) {
for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) sblock.append(i, j, A[i][j]);
} else if (itype == InitType.RAND_SET) {
LongLongDoubleHashMap map = new LongLongDoubleHashMap();
for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) map.addValue(i, j, A[i][j]);
Iterator<ADoubleEntry> iter = map.getIterator();
while (iter.hasNext()) {
// random hash order
ADoubleEntry e = iter.next();
sblock.append((int) e.getKey1(), (int) e.getKey2(), e.value);
}
}
// sort appended values
sblock.sort();
// check for correct number of non-zeros
int[] rnnz = new int[rows];
int nnz = 0;
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) rnnz[i] += (A[i][j] != 0) ? 1 : 0;
nnz += rnnz[i];
}
if (nnz != sblock.size())
Assert.fail("Wrong number of non-zeros: " + sblock.size() + ", expected: " + nnz);
// check correct isEmpty return
for (int i = 0; i < rows; i++) if (sblock.isEmpty(i) != (rnnz[i] == 0))
Assert.fail("Wrong isEmpty(row) result for row nnz: " + rnnz[i]);
// check correct values
for (int i = 0; i < rows; i++) if (!sblock.isEmpty(i))
for (int j = 0; j < cols; j++) {
double tmp = sblock.get(i, j);
if (tmp != A[i][j])
Assert.fail("Wrong get value for cell (" + i + "," + j + "): " + tmp + ", expected: " + A[i][j]);
}
} catch (Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
}
}
use of org.apache.sysml.runtime.util.LongLongDoubleHashMap.ADoubleEntry in project systemml by apache.
the class GMRCtableBuffer method flushBuffer.
public void flushBuffer(Reporter reporter) throws RuntimeException {
try {
if (_mapBuffer != null) {
// new MatrixIndexes();
MatrixIndexes key = null;
MatrixCell value = new MatrixCell();
for (Entry<Byte, CTableMap> ctable : _mapBuffer.entrySet()) {
ArrayList<Integer> resultIDs = ReduceBase.getOutputIndexes(ctable.getKey(), _resultIndexes);
CTableMap resultMap = ctable.getValue();
// maintain result dims and nonzeros
for (Integer i : resultIDs) {
_resultNonZeros[i] += resultMap.size();
if (_resultDimsUnknown[i] == (byte) 1) {
_resultMaxRowDims[i] = Math.max(resultMap.getMaxRow(), _resultMaxRowDims[i]);
_resultMaxColDims[i] = Math.max(resultMap.getMaxColumn(), _resultMaxColDims[i]);
}
}
// output result data
Iterator<ADoubleEntry> iter = resultMap.getIterator();
while (iter.hasNext()) {
ADoubleEntry e = iter.next();
key = new MatrixIndexes(e.getKey1(), e.getKey2());
value.setValue(e.value);
for (Integer i : resultIDs) _collector.collectOutput(key, value, i, reporter);
}
}
} else if (_blockBuffer != null) {
MatrixIndexes key = new MatrixIndexes(1, 1);
// DataConverter.writeBinaryBlockMatrixToHDFS(path, job, mat, mc.get_rows(), mc.get_cols(), mc.get_rows_per_block(), mc.get_cols_per_block(), replication);
for (Entry<Byte, MatrixBlock> ctable : _blockBuffer.entrySet()) {
ArrayList<Integer> resultIDs = ReduceBase.getOutputIndexes(ctable.getKey(), _resultIndexes);
MatrixBlock outBlock = ctable.getValue();
outBlock.recomputeNonZeros();
// TODO: change hard coding of 1000
int brlen = 1000, bclen = 1000;
int rlen = outBlock.getNumRows();
int clen = outBlock.getNumColumns();
// final output matrix is smaller than a single block
if (rlen <= brlen && clen <= brlen) {
key = new MatrixIndexes(1, 1);
for (Integer i : resultIDs) {
_collector.collectOutput(key, outBlock, i, reporter);
_resultNonZeros[i] += outBlock.getNonZeros();
}
} else {
// Following code is similar to that in DataConverter.DataConverter.writeBinaryBlockMatrixToHDFS
// initialize blocks for reuse (at most 4 different blocks required)
MatrixBlock[] blocks = MatrixWriter.createMatrixBlocksForReuse(rlen, clen, brlen, bclen, true, outBlock.getNonZeros());
// create and write subblocks of matrix
for (int blockRow = 0; blockRow < (int) Math.ceil(rlen / (double) brlen); blockRow++) {
for (int blockCol = 0; blockCol < (int) Math.ceil(clen / (double) bclen); blockCol++) {
int maxRow = (blockRow * brlen + brlen < rlen) ? brlen : rlen - blockRow * brlen;
int maxCol = (blockCol * bclen + bclen < clen) ? bclen : clen - blockCol * bclen;
int row_offset = blockRow * brlen;
int col_offset = blockCol * bclen;
// get reuse matrix block
MatrixBlock block = MatrixWriter.getMatrixBlockForReuse(blocks, maxRow, maxCol, brlen, bclen);
// copy submatrix to block
outBlock.slice(row_offset, row_offset + maxRow - 1, col_offset, col_offset + maxCol - 1, block);
// TODO: skip empty "block"
// append block to sequence file
key.setIndexes(blockRow + 1, blockCol + 1);
for (Integer i : resultIDs) {
_collector.collectOutput(key, block, i, reporter);
_resultNonZeros[i] += block.getNonZeros();
}
// reset block for later reuse
block.reset();
}
}
}
}
} else {
throw new DMLRuntimeException("Unexpected.. both ctable buffers are empty.");
}
} catch (Exception ex) {
throw new RuntimeException("Failed to flush ctable buffer.", ex);
}
// remove existing partial ctables
if (_mapBuffer != null)
_mapBuffer.clear();
else
_blockBuffer.clear();
}
use of org.apache.sysml.runtime.util.LongLongDoubleHashMap.ADoubleEntry in project systemml by apache.
the class CTableMap method toMatrixBlock.
public MatrixBlock toMatrixBlock(int rlen, int clen) {
// allocate new matrix block
int nnz = _map.size();
boolean sparse = MatrixBlock.evalSparseFormatInMemory(rlen, clen, nnz);
MatrixBlock mb = new MatrixBlock(rlen, clen, sparse, nnz).allocateBlock();
// copy map values into new matrix block
if (// SPARSE <- cells
sparse) {
// append cells to sparse target (unordered to avoid shifting)
SparseBlock sblock = mb.getSparseBlock();
Iterator<ADoubleEntry> iter2 = _map.getIterator();
while (iter2.hasNext()) {
ADoubleEntry e = iter2.next();
double value = e.value;
int rix = (int) e.getKey1();
int cix = (int) e.getKey2();
if (value != 0 && rix <= rlen && cix <= clen) {
sblock.allocate(rix - 1, Math.max(nnz / rlen, 1));
sblock.append(rix - 1, cix - 1, value);
}
}
// sort sparse target representation
mb.sortSparseRows();
mb.recomputeNonZeros();
} else // DENSE <- cells
{
// directly insert cells into dense target
Iterator<ADoubleEntry> iter = _map.getIterator();
while (iter.hasNext()) {
ADoubleEntry e = iter.next();
double value = e.value;
int rix = (int) e.getKey1();
int cix = (int) e.getKey2();
if (value != 0 && rix <= rlen && cix <= clen)
mb.quickSetValue(rix - 1, cix - 1, value);
}
}
return mb;
}
Aggregations