Search in sources :

Example 36 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameGoodMetadataDML.

@Test
public void testDataFrameGoodMetadataDML() {
    System.out.println("MLContextTest - DataFrame good metadata DML");
    List<String> list = new ArrayList<String>();
    list.add("10,20,30");
    list.add("40,50,60");
    list.add("70,80,90");
    JavaRDD<String> javaRddString = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    MatrixMetadata mm = new MatrixMetadata(3, 3, 9);
    Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
    setExpectedStdOut("sum: 450.0");
    ml.execute(script);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) StructField(org.apache.spark.sql.types.StructField) Row(org.apache.spark.sql.Row) MatrixMetadata(org.apache.sysml.api.mlcontext.MatrixMetadata) Test(org.junit.Test)

Example 37 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameSumPYDMLVectorWithNoIDColumn.

@Test
public void testDataFrameSumPYDMLVectorWithNoIDColumn() {
    System.out.println("MLContextTest - DataFrame sum PYDML, vector with no ID column");
    List<Vector> list = new ArrayList<Vector>();
    list.add(Vectors.dense(1.0, 2.0, 3.0));
    list.add(Vectors.dense(4.0, 5.0, 6.0));
    list.add(Vectors.dense(7.0, 8.0, 9.0));
    JavaRDD<Vector> javaRddVector = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);
    Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
    setExpectedStdOut("sum: 45.0");
    ml.execute(script);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) VectorUDT(org.apache.spark.ml.linalg.VectorUDT) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) StructField(org.apache.spark.sql.types.StructField) Row(org.apache.spark.sql.Row) MatrixMetadata(org.apache.sysml.api.mlcontext.MatrixMetadata) Vector(org.apache.spark.ml.linalg.Vector) DenseVector(org.apache.spark.ml.linalg.DenseVector) Test(org.junit.Test)

Example 38 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameSumDMLDoublesWithIDColumnNoFormatSpecified.

@Test
public void testDataFrameSumDMLDoublesWithIDColumnNoFormatSpecified() {
    System.out.println("MLContextTest - DataFrame sum DML, doubles with ID column, no format specified");
    List<String> list = new ArrayList<String>();
    list.add("1,2,2,2");
    list.add("2,3,3,3");
    list.add("3,4,4,4");
    JavaRDD<String> javaRddString = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
    setExpectedStdOut("sum: 27.0");
    ml.execute(script);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) StructField(org.apache.spark.sql.types.StructField) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) Row(org.apache.spark.sql.Row) Test(org.junit.Test)

Example 39 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameToBinaryBlocks.

@Test
public void testDataFrameToBinaryBlocks() {
    System.out.println("MLContextTest - DataFrame to binary blocks");
    List<String> list = new ArrayList<String>();
    list.add("1,2,3");
    list.add("4,5,6");
    list.add("7,8,9");
    JavaRDD<String> javaRddString = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    JavaPairRDD<MatrixIndexes, MatrixBlock> binaryBlocks = MLContextConversionUtil.dataFrameToMatrixBinaryBlocks(dataFrame);
    Tuple2<MatrixIndexes, MatrixBlock> first = binaryBlocks.first();
    MatrixBlock mb = first._2();
    double[][] matrix = DataConverter.convertToDoubleMatrix(mb);
    Assert.assertArrayEquals(new double[] { 1.0, 2.0, 3.0 }, matrix[0], 0.0);
    Assert.assertArrayEquals(new double[] { 4.0, 5.0, 6.0 }, matrix[1], 0.0);
    Assert.assertArrayEquals(new double[] { 7.0, 8.0, 9.0 }, matrix[2], 0.0);
}
Also used : MatrixBlock(org.apache.sysml.runtime.matrix.data.MatrixBlock) StructType(org.apache.spark.sql.types.StructType) MatrixIndexes(org.apache.sysml.runtime.matrix.data.MatrixIndexes) ArrayList(java.util.ArrayList) StructField(org.apache.spark.sql.types.StructField) Row(org.apache.spark.sql.Row) Test(org.junit.Test)

Example 40 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testOutputDataFrameDoublesWithIDColumnFromMatrixDML.

@Test
public void testOutputDataFrameDoublesWithIDColumnFromMatrixDML() {
    System.out.println("MLContextTest - output DataFrame of doubles with ID column from matrix DML");
    String s = "M = matrix('1 2 3 4', rows=2, cols=2);";
    Script script = dml(s).out("M");
    Dataset<Row> df = ml.execute(script).getMatrix("M").toDFDoubleWithIDColumn();
    Dataset<Row> sortedDF = df.sort(RDDConverterUtils.DF_ID_COLUMN);
    List<Row> list = sortedDF.collectAsList();
    Row row1 = list.get(0);
    Assert.assertEquals(1.0, row1.getDouble(0), 0.0);
    Assert.assertEquals(1.0, row1.getDouble(1), 0.0);
    Assert.assertEquals(2.0, row1.getDouble(2), 0.0);
    Row row2 = list.get(1);
    Assert.assertEquals(2.0, row2.getDouble(0), 0.0);
    Assert.assertEquals(3.0, row2.getDouble(1), 0.0);
    Assert.assertEquals(4.0, row2.getDouble(2), 0.0);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) Row(org.apache.spark.sql.Row) Test(org.junit.Test)

Aggregations

Row (org.apache.spark.sql.Row)129 Test (org.junit.Test)60 Script (org.apache.sysml.api.mlcontext.Script)53 StructType (org.apache.spark.sql.types.StructType)50 ArrayList (java.util.ArrayList)48 StructField (org.apache.spark.sql.types.StructField)46 SparkSession (org.apache.spark.sql.SparkSession)43 VectorUDT (org.apache.spark.ml.linalg.VectorUDT)19 MatrixMetadata (org.apache.sysml.api.mlcontext.MatrixMetadata)19 MLResults (org.apache.sysml.api.mlcontext.MLResults)18 DenseVector (org.apache.spark.ml.linalg.DenseVector)16 Vector (org.apache.spark.ml.linalg.Vector)16 MatrixBlock (org.apache.sysml.runtime.matrix.data.MatrixBlock)15 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)12 SQLContext (org.apache.spark.sql.SQLContext)12 User (uk.gov.gchq.gaffer.user.User)12 HashSet (java.util.HashSet)10 MatrixCharacteristics (org.apache.sysml.runtime.matrix.MatrixCharacteristics)9 Tuple2 (scala.Tuple2)9 GetDataFrameOfElements (uk.gov.gchq.gaffer.spark.operation.dataframe.GetDataFrameOfElements)9