Search in sources :

Example 26 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testOutputJavaRDDStringIJVFromMatrixDML.

@Test
public void testOutputJavaRDDStringIJVFromMatrixDML() {
    System.out.println("MLContextTest - output Java RDD String IJV from matrix DML");
    String s = "M = matrix('1 2 3 4', rows=2, cols=2);";
    Script script = dml(s).out("M");
    MLResults results = ml.execute(script);
    JavaRDD<String> javaRDDStringIJV = results.getJavaRDDStringIJV("M");
    List<String> lines = javaRDDStringIJV.sortBy(row -> row, true, 1).collect();
    Assert.assertEquals("1 1 1.0", lines.get(0));
    Assert.assertEquals("1 2 2.0", lines.get(1));
    Assert.assertEquals("2 1 3.0", lines.get(2));
    Assert.assertEquals("2 2 4.0", lines.get(3));
}
Also used : MatrixFormat(org.apache.sysml.api.mlcontext.MatrixFormat) Arrays(java.util.Arrays) URL(java.net.URL) MatrixObject(org.apache.sysml.runtime.controlprogram.caching.MatrixObject) MatrixIndexes(org.apache.sysml.runtime.matrix.data.MatrixIndexes) MatrixBlock(org.apache.sysml.runtime.matrix.data.MatrixBlock) Map(java.util.Map) DoubleType(org.apache.spark.sql.types.DoubleType) ScriptFactory.dml(org.apache.sysml.api.mlcontext.ScriptFactory.dml) ScriptFactory.pydmlFromUrl(org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromUrl) DataTypes(org.apache.spark.sql.types.DataTypes) StructField(org.apache.spark.sql.types.StructField) StructType(org.apache.spark.sql.types.StructType) Vector(org.apache.spark.ml.linalg.Vector) DenseVector(org.apache.spark.ml.linalg.DenseVector) MLContextConversionUtil(org.apache.sysml.api.mlcontext.MLContextConversionUtil) Seq(scala.collection.Seq) Tuple4(scala.Tuple4) ScriptFactory.dmlFromLocalFile(org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromLocalFile) Tuple2(scala.Tuple2) Collectors(java.util.stream.Collectors) Tuple3(scala.Tuple3) Tuple1(scala.Tuple1) List(java.util.List) Stream(java.util.stream.Stream) ScriptFactory.pydmlFromFile(org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromFile) MLResults(org.apache.sysml.api.mlcontext.MLResults) Vectors(org.apache.spark.ml.linalg.Vectors) MatrixCharacteristics(org.apache.sysml.runtime.matrix.MatrixCharacteristics) Function(org.apache.spark.api.java.function.Function) ScriptFactory.dmlFromFile(org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile) RDD(org.apache.spark.rdd.RDD) ScriptFactory.dmlFromInputStream(org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromInputStream) Dataset(org.apache.spark.sql.Dataset) MLContextException(org.apache.sysml.api.mlcontext.MLContextException) MatrixMetadata(org.apache.sysml.api.mlcontext.MatrixMetadata) RDDConverterUtils(org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtils) MLContextUtil(org.apache.sysml.api.mlcontext.MLContextUtil) HashMap(java.util.HashMap) DataConverter(org.apache.sysml.runtime.util.DataConverter) ArrayList(java.util.ArrayList) Script(org.apache.sysml.api.mlcontext.Script) Statistics(org.apache.sysml.utils.Statistics) VectorUDT(org.apache.spark.ml.linalg.VectorUDT) ScriptFactory.pydmlFromLocalFile(org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromLocalFile) JavaRDD(org.apache.spark.api.java.JavaRDD) JavaConversions(scala.collection.JavaConversions) MalformedURLException(java.net.MalformedURLException) RowFactory(org.apache.spark.sql.RowFactory) Iterator(scala.collection.Iterator) Matrix(org.apache.sysml.api.mlcontext.Matrix) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) FileInputStream(java.io.FileInputStream) Row(org.apache.spark.sql.Row) JavaPairRDD(org.apache.spark.api.java.JavaPairRDD) File(java.io.File) ScriptFactory.pydml(org.apache.sysml.api.mlcontext.ScriptFactory.pydml) ScriptFactory.pydmlFromInputStream(org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromInputStream) ScriptFactory.dmlFromUrl(org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromUrl) ScriptExecutor(org.apache.sysml.api.mlcontext.ScriptExecutor) Assert(org.junit.Assert) InputStream(java.io.InputStream) Script(org.apache.sysml.api.mlcontext.Script) MLResults(org.apache.sysml.api.mlcontext.MLResults) Test(org.junit.Test)

Example 27 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameSumDMLMllibVectorWithNoIDColumn.

@Test
public void testDataFrameSumDMLMllibVectorWithNoIDColumn() {
    System.out.println("MLContextTest - DataFrame sum DML, mllib vector with no ID column");
    List<org.apache.spark.mllib.linalg.Vector> list = new ArrayList<org.apache.spark.mllib.linalg.Vector>();
    list.add(org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0));
    list.add(org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0));
    list.add(org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0));
    JavaRDD<org.apache.spark.mllib.linalg.Vector> javaRddVector = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddVector.map(new MllibVectorRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField("C1", new org.apache.spark.mllib.linalg.VectorUDT(), true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);
    Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
    setExpectedStdOut("sum: 45.0");
    ml.execute(script);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) VectorUDT(org.apache.spark.ml.linalg.VectorUDT) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) StructField(org.apache.spark.sql.types.StructField) Row(org.apache.spark.sql.Row) MatrixMetadata(org.apache.sysml.api.mlcontext.MatrixMetadata) Vector(org.apache.spark.ml.linalg.Vector) DenseVector(org.apache.spark.ml.linalg.DenseVector) Test(org.junit.Test)

Example 28 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testOutputDataFramePYDMLVectorWithIDColumn.

@Test
public void testOutputDataFramePYDMLVectorWithIDColumn() {
    System.out.println("MLContextTest - output DataFrame PYDML, vector with ID column");
    String s = "M = full('1 2 3 4', rows=2, cols=2)";
    Script script = pydml(s).out("M");
    MLResults results = ml.execute(script);
    Dataset<Row> dataFrame = results.getDataFrameVectorWithIDColumn("M");
    List<Row> list = dataFrame.collectAsList();
    Row row1 = list.get(0);
    Assert.assertEquals(1.0, row1.getDouble(0), 0.0);
    Assert.assertArrayEquals(new double[] { 1.0, 2.0 }, ((Vector) row1.get(1)).toArray(), 0.0);
    Row row2 = list.get(1);
    Assert.assertEquals(2.0, row2.getDouble(0), 0.0);
    Assert.assertArrayEquals(new double[] { 3.0, 4.0 }, ((Vector) row2.get(1)).toArray(), 0.0);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) MLResults(org.apache.sysml.api.mlcontext.MLResults) Row(org.apache.spark.sql.Row) Test(org.junit.Test)

Example 29 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameSumPYDMLDoublesWithNoIDColumnNoFormatSpecified.

@Test
public void testDataFrameSumPYDMLDoublesWithNoIDColumnNoFormatSpecified() {
    System.out.println("MLContextTest - DataFrame sum PYDML, doubles with no ID column, no format specified");
    List<String> list = new ArrayList<String>();
    list.add("2,2,2");
    list.add("3,3,3");
    list.add("4,4,4");
    JavaRDD<String> javaRddString = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame);
    setExpectedStdOut("sum: 27.0");
    ml.execute(script);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) StructField(org.apache.spark.sql.types.StructField) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) Row(org.apache.spark.sql.Row) Test(org.junit.Test)

Example 30 with Row

use of org.apache.spark.sql.Row in project incubator-systemml by apache.

the class MLContextTest method testDataFrameSumPYDMLDoublesWithIDColumn.

@Test
public void testDataFrameSumPYDMLDoublesWithIDColumn() {
    System.out.println("MLContextTest - DataFrame sum PYDML, doubles with ID column");
    List<String> list = new ArrayList<String>();
    list.add("1,1,2,3");
    list.add("2,4,5,6");
    list.add("3,7,8,9");
    JavaRDD<String> javaRddString = sc.parallelize(list);
    JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
    List<StructField> fields = new ArrayList<StructField>();
    fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
    fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
    StructType schema = DataTypes.createStructType(fields);
    Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
    MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_DOUBLES_WITH_INDEX);
    Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
    setExpectedStdOut("sum: 45.0");
    ml.execute(script);
}
Also used : Script(org.apache.sysml.api.mlcontext.Script) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) StructField(org.apache.spark.sql.types.StructField) Row(org.apache.spark.sql.Row) MatrixMetadata(org.apache.sysml.api.mlcontext.MatrixMetadata) Test(org.junit.Test)

Aggregations

Row (org.apache.spark.sql.Row)129 Test (org.junit.Test)60 Script (org.apache.sysml.api.mlcontext.Script)53 StructType (org.apache.spark.sql.types.StructType)50 ArrayList (java.util.ArrayList)48 StructField (org.apache.spark.sql.types.StructField)46 SparkSession (org.apache.spark.sql.SparkSession)43 VectorUDT (org.apache.spark.ml.linalg.VectorUDT)19 MatrixMetadata (org.apache.sysml.api.mlcontext.MatrixMetadata)19 MLResults (org.apache.sysml.api.mlcontext.MLResults)18 DenseVector (org.apache.spark.ml.linalg.DenseVector)16 Vector (org.apache.spark.ml.linalg.Vector)16 MatrixBlock (org.apache.sysml.runtime.matrix.data.MatrixBlock)15 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)12 SQLContext (org.apache.spark.sql.SQLContext)12 User (uk.gov.gchq.gaffer.user.User)12 HashSet (java.util.HashSet)10 MatrixCharacteristics (org.apache.sysml.runtime.matrix.MatrixCharacteristics)9 Tuple2 (scala.Tuple2)9 GetDataFrameOfElements (uk.gov.gchq.gaffer.spark.operation.dataframe.GetDataFrameOfElements)9