use of org.apache.sysml.api.mlcontext.Matrix in project incubator-systemml by apache.
the class ScalarMatrixElementwiseOpTests method runScalarMatrixElementWiseTests.
/**
* Runs a simple scalar-matrix elementwise op test
*
* @param scriptStr the script string
* @param inputMatrix name of the matrix input in the script string
* @param inputScalar name of the scalar input in the script string
* @param output name of the output variable in the script string
* @param scalars array of scalars for which to run this test
* @param heavyHitterOpCode the string printed for the unary op heavy hitter when executed on gpu
*/
private void runScalarMatrixElementWiseTests(String scriptStr, String inputMatrix, String inputScalar, String output, double[] scalars, String heavyHitterOpCode) {
for (int i = 0; i < rowSizes.length; i++) {
for (int j = 0; j < columnSizes.length; j++) {
for (int k = 0; k < sparsities.length; k++) {
for (int l = 0; l < scalars.length; l++) {
int m = rowSizes[i];
int n = columnSizes[j];
double sparsity = sparsities[k];
double scalar = scalars[l];
System.out.println("Matrix is of size [" + m + ", " + n + "], sparsity = " + sparsity + ", scalar = " + scalar);
Matrix X = generateInputMatrix(spark, m, n, sparsity, seed);
HashMap<String, Object> inputs = new HashMap<>();
inputs.put(inputMatrix, X);
inputs.put(inputScalar, scalar);
List<Object> cpuOut = runOnCPU(spark, scriptStr, inputs, Arrays.asList(output));
List<Object> gpuOut = runOnGPU(spark, scriptStr, inputs, Arrays.asList(output));
//assertHeavyHitterPresent(heavyHitterOpCode);
assertEqualObjects(cpuOut.get(0), gpuOut.get(0));
}
}
}
}
}
use of org.apache.sysml.api.mlcontext.Matrix in project incubator-systemml by apache.
the class UnaryOpTestsBase method testUnaryOpMatrixOutput.
/**
* Tests a single unary op with inputs and outputs of the specified size and sparsity
*
* @param scriptStr script string
* @param heavyHitterOpCode the string printed for the unary op heavy hitter when executed on gpu
* @param inStr name of input variable in provided script string
* @param outStr name of output variable in script string
* @param seed seed for the random number generator for the random input matrix
* @param row number of rows of input matrix
* @param column number of rows of input matrix
* @param sparsity sparsity of the input matrix
*/
public void testUnaryOpMatrixOutput(String scriptStr, String heavyHitterOpCode, String inStr, String outStr, int seed, int row, int column, double sparsity) {
System.out.println("Matrix of size [" + row + ", " + column + "], sparsity = " + sparsity);
Matrix in1 = generateInputMatrix(spark, row, column, sparsity, seed);
HashMap<String, Object> inputs = new HashMap<>();
inputs.put(inStr, in1);
List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, Arrays.asList(outStr));
List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, Arrays.asList(outStr));
//assertHeavyHitterPresent(heavyHitterOpCode);
assertEqualObjects(outCPU.get(0), outGPU.get(0));
}
use of org.apache.sysml.api.mlcontext.Matrix in project incubator-systemml by apache.
the class NeuralNetworkOpTests method testConv2dBackwardData.
@Test
public void testConv2dBackwardData() {
String scriptStr = "O = conv2d_backward_data(filter, dout, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], filter_shape=[K,C,R,S])";
for (long N : Nlst) {
for (long C : Clst) {
for (long H : Hlst) {
for (long W : Wlst) {
for (long K : Klst) {
for (long R : Rlst) {
for (long S : Slst) {
for (long strideH : strideHeightLst) {
for (long strideW : strideWidthLst) {
for (long padH : padHeightLst) {
for (long padW : padWidthLst) {
for (double sparsity : sparsitylst) {
// filter is smaller than image + padding
if (R > (H + padH) || S > (W + padW))
continue;
// Make sure ops fit in GPU memory and within constraints of cudnn
long imageSize = N * C * H * W * 8l;
if (// image size
imageSize > MAX_OP_SIZE)
continue;
long filterSize = K * C * R * S * 8l;
if (// filter size
filterSize > MAX_OP_SIZE)
continue;
int P = (int) ConvolutionUtils.getP(H, R, strideH, padH);
int Q = (int) ConvolutionUtils.getQ(W, S, strideW, padW);
long doutSize = N * K * P * Q * 8l;
if (// dout/output size
doutSize > MAX_OP_SIZE)
continue;
double imageSizeInMB = imageSize / (1024.0 * 1024.0);
double filterSizeInMB = filterSize / (1024.0 * 1024.0);
double doutSizeInMB = doutSize / (1024.0 * 1024.0);
System.out.format("conv2d_backward_data, image[%d,%d,%d,%d](%.1fMB), filter[%d,%d,%d,%d](%.1f), dout[%d,%d,%d,%d](%.1fMB), stride[%d,%d], padding[%d,%d]", N, C, H, W, imageSizeInMB, N, C, R, S, filterSizeInMB, N, K, P, Q, doutSizeInMB, strideH, strideW, padH, padW);
Matrix filter = generateInputMatrix(spark, (int) K, (int) (C * R * S), sparsity, seed);
Matrix dout = generateInputMatrix(spark, (int) N, (int) (K * P * Q), sparsity, seed);
HashMap<String, Object> inputs = new HashMap<>();
inputs.put("N", N);
inputs.put("C", C);
inputs.put("H", H);
inputs.put("W", W);
inputs.put("K", K);
inputs.put("R", R);
inputs.put("S", S);
inputs.put("strideH", strideH);
inputs.put("strideW", strideW);
inputs.put("padH", padH);
inputs.put("padW", padW);
inputs.put("filter", filter);
inputs.put("dout", dout);
List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, Arrays.asList("O"));
List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, Arrays.asList("O"));
assertHeavyHitterPresent("gpu_conv2d_backward_data");
assertEqualObjects(outCPU.get(0), outGPU.get(0));
clearGPUMemory();
}
}
}
}
}
}
}
}
}
}
}
}
}
use of org.apache.sysml.api.mlcontext.Matrix in project incubator-systemml by apache.
the class NeuralNetworkOpTests method testMaxPoolBackward.
@Test
public void testMaxPoolBackward() {
String scriptStr = "O = max_pool_backward(image, dout, padding=[padH, padW], stride=[strideH, strideW], input_shape=[N,C,H,W], pool_size=[R,S])";
for (long N : Nlst) {
for (long C : Clst) {
for (long H : Hlst) {
for (long W : Wlst) {
for (long R : Rlst) {
for (long S : Slst) {
for (long strideH : strideHeightLst) {
for (long strideW : strideWidthLst) {
for (long padH : padHeightLst) {
for (long padW : padWidthLst) {
for (double sparsity : sparsitylst) {
// pool is smaller than image + padding
if (R > (H + padH) || S > (W + padW))
continue;
// Make sure ops fit in GPU memory and within constraints of cudnn
long imageSize = N * C * H * W * 8l;
if (// image size
imageSize > MAX_OP_SIZE)
continue;
long poolSize = R * S * 8l;
if (// filter size
poolSize > MAX_OP_SIZE)
continue;
int P = (int) ConvolutionUtils.getP(H, R, strideH, padH);
int Q = (int) ConvolutionUtils.getQ(W, S, strideW, padW);
long doutSize = N * C * P * Q * 8l;
if (// dout/output size
doutSize > MAX_OP_SIZE)
continue;
double imageSizeInMB = imageSize / (1024.0 * 1024.0);
double poolSizeInMB = poolSize / (1024.0 * 1024.0);
double doutSizeInMB = doutSize / (1024.0 * 1024.0);
System.out.format("max_pool_backward, image[%d,%d,%d,%d](%.1fMB), pool[%d,%d](%.1f), dout[%d,%d,%d,%d](%.1fMB), stride[%d,%d], padding[%d,%d]", N, C, H, W, imageSizeInMB, R, S, poolSizeInMB, N, C, P, Q, doutSizeInMB, strideH, strideW, padH, padW);
Matrix image = generateInputMatrix(spark, (int) N, (int) (C * H * W), sparsity, seed);
Matrix dout = generateInputMatrix(spark, (int) N, (int) (C * P * Q), sparsity, seed);
HashMap<String, Object> inputs = new HashMap<>();
inputs.put("N", N);
inputs.put("C", C);
inputs.put("H", H);
inputs.put("W", W);
inputs.put("R", R);
inputs.put("S", S);
inputs.put("strideH", strideH);
inputs.put("strideW", strideW);
inputs.put("padH", padH);
inputs.put("padW", padW);
inputs.put("image", image);
inputs.put("dout", dout);
List<Object> outCPU = runOnCPU(spark, scriptStr, inputs, Arrays.asList("O"));
List<Object> outGPU = runOnGPU(spark, scriptStr, inputs, Arrays.asList("O"));
assertHeavyHitterPresent("gpu_maxpooling_backward");
assertEqualObjects(outCPU.get(0), outGPU.get(0));
clearGPUMemory();
}
}
}
}
}
}
}
}
}
}
}
}
use of org.apache.sysml.api.mlcontext.Matrix in project incubator-systemml by apache.
the class ReorgOpTests method transposeTest.
@Test
public void transposeTest() {
String scriptStr = "out = t(in1)";
for (int i = 0; i < rowSizes.length; i++) {
for (int j = 0; j < columnSizes.length; j++) {
for (int k = 0; k < sparsities.length; k++) {
int m = rowSizes[i];
int n = columnSizes[j];
double sparsity = sparsities[k];
HashMap<String, Object> inputs = new HashMap<>();
Matrix in1 = generateInputMatrix(spark, m, n, sparsity, seed);
inputs.put("in1", in1);
List<Object> cpuOuts = runOnCPU(spark, scriptStr, inputs, Arrays.asList("out"));
List<Object> gpuOuts = runOnGPU(spark, scriptStr, inputs, Arrays.asList("out"));
//assertHeavyHitterPresent("gpu_r'");
assertEqualObjects(cpuOuts.get(0), gpuOuts.get(0));
}
}
}
}
Aggregations