use of org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM in project incubator-systemml by apache.
the class Conv2DBackwardDataTest method runConv2DTest.
/**
*
* @param et
* @param sparse
*/
public void runConv2DTest(ExecType et, int imgSize, int numImg, int numChannels, int numFilters, int filterSize, int stride, int pad, boolean sparse1, boolean sparse2) {
RUNTIME_PLATFORM oldRTP = rtplatform;
boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
try {
TestConfiguration config = getTestConfiguration(TEST_NAME);
if (et == ExecType.SPARK) {
rtplatform = RUNTIME_PLATFORM.SPARK;
} else {
rtplatform = (et == ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.SINGLE_NODE;
}
if (rtplatform == RUNTIME_PLATFORM.SPARK)
DMLScript.USE_LOCAL_SPARK_CONFIG = true;
loadTestConfiguration(config);
/* This is for running the junit test the new way, i.e., construct the arguments directly */
String RI_HOME = SCRIPT_DIR + TEST_DIR;
fullDMLScriptName = RI_HOME + TEST_NAME + ".dml";
String sparseVal1 = ("" + sparse1).toUpperCase();
String sparseVal2 = ("" + sparse2).toUpperCase();
long P = ConvolutionUtils.getP(imgSize, filterSize, stride, pad);
programArgs = new String[] { "-explain", "-args", "" + imgSize, "" + numImg, "" + numChannels, "" + numFilters, "" + filterSize, "" + stride, "" + pad, "" + P, "" + P, output("B"), sparseVal1, sparseVal2 };
boolean exceptionExpected = false;
int expectedNumberOfJobs = -1;
runTest(true, exceptionExpected, null, expectedNumberOfJobs);
fullRScriptName = RI_HOME + TEST_NAME + ".R";
rCmd = "Rscript" + " " + fullRScriptName + " " + imgSize + " " + numImg + " " + numChannels + " " + numFilters + " " + filterSize + " " + stride + " " + pad + " " + P + " " + P + " " + expectedDir() + " " + sparseVal1 + " " + sparseVal2;
// Run comparison R script
runRScript(true);
HashMap<CellIndex, Double> bHM = readRMatrixFromFS("B");
HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
TestUtils.compareMatrices(dmlfile, bHM, epsilon, "B-DML", "NumPy");
} finally {
rtplatform = oldRTP;
DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;
}
}
use of org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM in project incubator-systemml by apache.
the class PoolTest method runPoolTest.
/**
*
* @param et
* @param sparse
*/
public void runPoolTest(ExecType et, int imgSize, int numImg, int numChannels, int stride, int pad, int poolSize1, int poolSize2, String poolMode, boolean sparse) {
RUNTIME_PLATFORM oldRTP = rtplatform;
boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
try {
String sparseVal = ("" + sparse).toUpperCase();
TestConfiguration config = getTestConfiguration(TEST_NAME);
if (et == ExecType.SPARK) {
rtplatform = RUNTIME_PLATFORM.SPARK;
} else {
rtplatform = (et == ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.SINGLE_NODE;
}
if (rtplatform == RUNTIME_PLATFORM.SPARK)
DMLScript.USE_LOCAL_SPARK_CONFIG = true;
loadTestConfiguration(config);
/* This is for running the junit test the new way, i.e., construct the arguments directly */
String RI_HOME = SCRIPT_DIR + TEST_DIR;
fullDMLScriptName = RI_HOME + TEST_NAME + ".dml";
programArgs = new String[] { "-explain", "-args", "" + imgSize, "" + numImg, "" + numChannels, "" + poolSize1, "" + poolSize2, "" + stride, "" + pad, poolMode, output("B"), sparseVal };
boolean exceptionExpected = false;
int expectedNumberOfJobs = -1;
runTest(true, exceptionExpected, null, expectedNumberOfJobs);
fullRScriptName = RI_HOME + TEST_NAME + ".R";
rCmd = "Rscript" + " " + fullRScriptName + " " + imgSize + " " + numImg + " " + numChannels + " " + poolSize1 + " " + poolSize2 + " " + stride + " " + pad + " " + expectedDir() + " " + sparseVal;
// Run comparison R script
runRScript(true);
HashMap<CellIndex, Double> bHM = readRMatrixFromFS("B");
HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("B");
TestUtils.compareMatrices(dmlfile, bHM, epsilon, "B-DML", "NumPy");
} finally {
rtplatform = oldRTP;
DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;
}
}
use of org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM in project incubator-systemml by apache.
the class ParForSampleTest method runParForSampleTest.
/**
*
* @param outer
* @param instType
* @param smallMem
* @param sparse
*/
@SuppressWarnings({ "unchecked" })
private void runParForSampleTest(boolean sparse, ExecType et) {
RUNTIME_PLATFORM platformOld = rtplatform;
switch(et) {
case MR:
rtplatform = RUNTIME_PLATFORM.HADOOP;
break;
case SPARK:
rtplatform = RUNTIME_PLATFORM.SPARK;
break;
default:
rtplatform = RUNTIME_PLATFORM.HYBRID;
break;
}
boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
if (rtplatform == RUNTIME_PLATFORM.SPARK)
DMLScript.USE_LOCAL_SPARK_CONFIG = true;
try {
//invocation arguments
TestConfiguration config = getTestConfiguration(TEST_NAME);
config.addVariable("rows", rows);
config.addVariable("cols", cols);
loadTestConfiguration(config);
fullDMLScriptName = SCRIPT_DIR + TEST_DIR + TEST_NAME + ".dml";
programArgs = new String[] { "-explain", "-args", input("A"), "0.8 0.2", output("B") };
//generate input data + sequence in first column
double[][] A = getRandomMatrix(rows, cols, -1, 1, sparse ? sparsity2 : sparsity1, 7);
for (int i = 0; i < A.length; i++) A[i][0] = (i + 1);
writeInputMatrixWithMTD("A", A, false);
//run test case
runTest(true, false, null, -1);
//read result data and meta data
HashMap<CellIndex, Double> B1 = readDMLMatrixFromHDFS("B1");
HashMap<CellIndex, Double> B2 = readDMLMatrixFromHDFS("B2");
MatrixCharacteristics B1mc = readDMLMetaDataFile("B1");
MatrixCharacteristics B2mc = readDMLMetaDataFile("B2");
//compare meta data
//join full coverage rows
Assert.assertEquals(new Long(rows), new Long(B1mc.getRows() + B2mc.getRows()));
//full coverage cols
Assert.assertEquals(new Long(cols), new Long(B1mc.getCols()));
//full coverage cols
Assert.assertEquals(new Long(cols), new Long(B2mc.getCols()));
//no sample contains all rows
Assert.assertNotEquals(new Long(rows), new Long(B1mc.getRows()));
//no sample contains all rows
Assert.assertNotEquals(new Long(rows), new Long(B2mc.getRows()));
//compare data
HashSet<Integer> probe = new HashSet<Integer>(rows);
for (int i = 0; i < rows; i++) probe.add(i + 1);
for (HashMap<CellIndex, Double> B : new HashMap[] { B1, B2 }) for (Entry<CellIndex, Double> e : B.entrySet()) if (e.getKey().column == 1) {
boolean flag = probe.remove(e.getValue().intValue());
Assert.assertTrue("Wrong return value for " + e.getKey() + ": " + e.getValue(), flag);
}
} finally {
rtplatform = platformOld;
DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;
}
}
use of org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM in project incubator-systemml by apache.
the class ParForCorrelationTest method runParForCorrelationTest.
/**
*
* @param outer execution mode of outer parfor loop
* @param inner execution mode of inner parfor loop
* @param instType execution mode of instructions
*/
private void runParForCorrelationTest(boolean parallel, PExecMode outer, PExecMode inner, ExecType instType, boolean profile, boolean debug, boolean statistics) {
//inst exec type, influenced via rows
RUNTIME_PLATFORM oldPlatform = rtplatform;
rtplatform = (instType == ExecType.MR) ? RUNTIME_PLATFORM.HADOOP : RUNTIME_PLATFORM.HYBRID;
int cols = (instType == ExecType.MR) ? cols2 : cols1;
//script
int scriptNum = -1;
if (parallel) {
if (inner == PExecMode.REMOTE_MR)
scriptNum = 2;
else if (outer == PExecMode.REMOTE_MR)
scriptNum = 3;
else if (outer == PExecMode.LOCAL)
scriptNum = 1;
else if (//optimized with profile
profile)
//optimized with profile
scriptNum = 5;
else if (//optimized with profile
debug)
//optimized with profile
scriptNum = 6;
else
//optimized
scriptNum = 4;
} else {
scriptNum = 0;
}
TestConfiguration config = getTestConfiguration(TEST_NAME);
config.addVariable("rows", rows);
config.addVariable("cols", cols);
loadTestConfiguration(config);
boolean oldStatistics = DMLScript.STATISTICS;
/* This is for running the junit test the new way, i.e., construct the arguments directly */
String HOME = SCRIPT_DIR + TEST_DIR;
fullDMLScriptName = HOME + TEST_NAME + scriptNum + ".dml";
if (statistics) {
programArgs = new String[] { "-stats", "-args", input("V"), Integer.toString(rows), Integer.toString(cols), output("PearsonR") };
} else {
programArgs = new String[] { "-args", input("V"), Integer.toString(rows), Integer.toString(cols), output("PearsonR") };
}
fullRScriptName = HOME + TEST_NAME + ".R";
rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + expectedDir();
long seed = System.nanoTime();
double[][] V = getRandomMatrix(rows, cols, minVal, maxVal, 1.0, seed);
writeInputMatrix("V", V, true);
try {
boolean exceptionExpected = false;
runTest(true, exceptionExpected, null, -1);
runRScript(true);
} finally {
DMLScript.STATISTICS = oldStatistics;
rtplatform = oldPlatform;
}
//compare matrices
HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS("PearsonR");
HashMap<CellIndex, Double> rfile = readRMatrixFromFS("Rout");
TestUtils.compareMatrices(dmlfile, rfile, eps, "PearsonR-DML", "PearsonR-R");
}
use of org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM in project incubator-systemml by apache.
the class ColVariancesTest method testColVariances.
/**
* Test the column variances function, "colVars(X)", on
* dense/sparse matrices/vectors on the CP/Spark/MR platforms.
*
* @param testName The name of this test case.
* @param sparsity Selection between empty, sparse, and dense data.
* @param dataType Selection between a matrix, a row vector, and a
* column vector.
* @param rewrites Whether or not to employ algebraic rewrites.
* @param platform Selection between CP/Spark/MR platforms.
*/
private void testColVariances(String testName, Sparsity sparsity, DataType dataType, boolean rewrites, ExecType platform) {
// Configure settings for this test case
boolean rewritesOld = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;
OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewrites;
RUNTIME_PLATFORM platformOld = rtplatform;
switch(platform) {
case MR:
rtplatform = RUNTIME_PLATFORM.HADOOP;
break;
case SPARK:
rtplatform = RUNTIME_PLATFORM.SPARK;
break;
default:
rtplatform = RUNTIME_PLATFORM.SINGLE_NODE;
break;
}
boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;
if (rtplatform == RUNTIME_PLATFORM.SPARK)
DMLScript.USE_LOCAL_SPARK_CONFIG = true;
try {
// Create and load test configuration
getAndLoadTestConfiguration(testName);
String HOME = SCRIPT_DIR + TEST_DIR;
fullDMLScriptName = HOME + testName + ".dml";
programArgs = new String[] { "-explain", "-stats", "-args", input(INPUT_NAME), output(OUTPUT_NAME) };
fullRScriptName = HOME + testName + ".R";
rCmd = "Rscript" + " " + fullRScriptName + " " + inputDir() + " " + expectedDir();
// Generate data
// - sparsity
double sparsityVal;
switch(sparsity) {
case EMPTY:
sparsityVal = 0;
break;
case SPARSE:
sparsityVal = sparsitySparse;
break;
case DENSE:
default:
sparsityVal = sparsityDense;
}
// - size
int r;
int c;
switch(dataType) {
case ROWVECTOR:
r = 1;
c = cols;
break;
case COLUMNVECTOR:
r = rows;
c = 1;
break;
case MATRIX:
default:
r = rows;
c = cols;
}
// - generation
double[][] X = getRandomMatrix(r, c, -1, 1, sparsityVal, 7);
writeInputMatrixWithMTD(INPUT_NAME, X, true);
// Run DML and R scripts
runTest(true, false, null, -1);
runRScript(true);
// Compare output matrices
HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(OUTPUT_NAME);
HashMap<CellIndex, Double> rfile = readRMatrixFromFS(OUTPUT_NAME);
TestUtils.compareMatrices(dmlfile, rfile, eps, "Stat-DML", "Stat-R");
// rewritten to var(X).
if (rewrites && (platform == ExecType.SPARK || platform == ExecType.CP)) {
String prefix = (platform == ExecType.SPARK) ? Instruction.SP_INST_PREFIX : "";
if (dataType == DataType.ROWVECTOR) {
String opcode = prefix + colVarOp;
boolean rewriteApplied = !Statistics.getCPHeavyHitterOpCodes().contains(opcode);
Assert.assertTrue("Rewrite not applied to row vector case.", rewriteApplied);
} else if (dataType == DataType.COLUMNVECTOR) {
String opcode = prefix + varOp;
boolean rewriteApplied = Statistics.getCPHeavyHitterOpCodes().contains(opcode);
Assert.assertTrue("Rewrite not applied to column vector case.", rewriteApplied);
}
}
} finally {
// Reset settings
OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = rewritesOld;
rtplatform = platformOld;
DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;
}
}
Aggregations