use of org.apache.spark.sql.types.StructField in project incubator-systemml by apache.
the class MLContextTest method testDataFrameGoodMetadataPYDML.
@Test
public void testDataFrameGoodMetadataPYDML() {
System.out.println("MLContextTest - DataFrame good metadata PYDML");
List<String> list = new ArrayList<String>();
list.add("10,20,30");
list.add("40,50,60");
list.add("70,80,90");
JavaRDD<String> javaRddString = sc.parallelize(list);
JavaRDD<Row> javaRddRow = javaRddString.map(new CommaSeparatedValueStringToDoubleArrayRow());
List<StructField> fields = new ArrayList<StructField>();
fields.add(DataTypes.createStructField("C1", DataTypes.DoubleType, true));
fields.add(DataTypes.createStructField("C2", DataTypes.DoubleType, true));
fields.add(DataTypes.createStructField("C3", DataTypes.DoubleType, true));
StructType schema = DataTypes.createStructType(fields);
Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);
MatrixMetadata mm = new MatrixMetadata(3, 3, 9);
Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
setExpectedStdOut("sum: 450.0");
ml.execute(script);
}
use of org.apache.spark.sql.types.StructField in project incubator-systemml by apache.
the class MLContextFrameTest method testFrame.
public void testFrame(FrameFormat format, SCRIPT_TYPE script_type, IO_TYPE inputType, IO_TYPE outputType) {
System.out.println("MLContextTest - Frame JavaRDD<String> for format: " + format + " Script: " + script_type);
List<String> listA = new ArrayList<String>();
List<String> listB = new ArrayList<String>();
FrameMetadata fmA = null, fmB = null;
Script script = null;
ValueType[] schemaA = { ValueType.INT, ValueType.STRING, ValueType.DOUBLE, ValueType.BOOLEAN };
List<ValueType> lschemaA = Arrays.asList(schemaA);
FrameSchema fschemaA = new FrameSchema(lschemaA);
ValueType[] schemaB = { ValueType.STRING, ValueType.DOUBLE, ValueType.BOOLEAN };
List<ValueType> lschemaB = Arrays.asList(schemaB);
FrameSchema fschemaB = new FrameSchema(lschemaB);
if (inputType != IO_TYPE.FILE) {
if (format == FrameFormat.CSV) {
listA.add("1,Str2,3.0,true");
listA.add("4,Str5,6.0,false");
listA.add("7,Str8,9.0,true");
listB.add("Str12,13.0,true");
listB.add("Str25,26.0,false");
fmA = new FrameMetadata(FrameFormat.CSV, fschemaA, 3, 4);
fmB = new FrameMetadata(FrameFormat.CSV, fschemaB, 2, 3);
} else if (format == FrameFormat.IJV) {
listA.add("1 1 1");
listA.add("1 2 Str2");
listA.add("1 3 3.0");
listA.add("1 4 true");
listA.add("2 1 4");
listA.add("2 2 Str5");
listA.add("2 3 6.0");
listA.add("2 4 false");
listA.add("3 1 7");
listA.add("3 2 Str8");
listA.add("3 3 9.0");
listA.add("3 4 true");
listB.add("1 1 Str12");
listB.add("1 2 13.0");
listB.add("1 3 true");
listB.add("2 1 Str25");
listB.add("2 2 26.0");
listB.add("2 3 false");
fmA = new FrameMetadata(FrameFormat.IJV, fschemaA, 3, 4);
fmB = new FrameMetadata(FrameFormat.IJV, fschemaB, 2, 3);
}
JavaRDD<String> javaRDDA = sc.parallelize(listA);
JavaRDD<String> javaRDDB = sc.parallelize(listB);
if (inputType == IO_TYPE.DATAFRAME) {
JavaRDD<Row> javaRddRowA = FrameRDDConverterUtils.csvToRowRDD(sc, javaRDDA, CSV_DELIM, schemaA);
JavaRDD<Row> javaRddRowB = FrameRDDConverterUtils.csvToRowRDD(sc, javaRDDB, CSV_DELIM, schemaB);
// Create DataFrame
StructType dfSchemaA = FrameRDDConverterUtils.convertFrameSchemaToDFSchema(schemaA, false);
Dataset<Row> dataFrameA = spark.createDataFrame(javaRddRowA, dfSchemaA);
StructType dfSchemaB = FrameRDDConverterUtils.convertFrameSchemaToDFSchema(schemaB, false);
Dataset<Row> dataFrameB = spark.createDataFrame(javaRddRowB, dfSchemaB);
if (script_type == SCRIPT_TYPE.DML)
script = dml("A[2:3,2:4]=B;C=A[2:3,2:3]").in("A", dataFrameA, fmA).in("B", dataFrameB, fmB).out("A").out("C");
else if (script_type == SCRIPT_TYPE.PYDML)
// DO NOT USE ; at the end of any statment, it throws NPE
script = pydml("A[$X:$Y,$X:$Z]=B\nC=A[$X:$Y,$X:$Y]").in("A", dataFrameA, fmA).in("B", dataFrameB, fmB).in("$X", 1).in("$Y", 3).in("$Z", 4).out("A").out("C");
} else {
if (inputType == IO_TYPE.JAVA_RDD_STR_CSV || inputType == IO_TYPE.JAVA_RDD_STR_IJV) {
if (script_type == SCRIPT_TYPE.DML)
script = dml("A[2:3,2:4]=B;C=A[2:3,2:3]").in("A", javaRDDA, fmA).in("B", javaRDDB, fmB).out("A").out("C");
else if (script_type == SCRIPT_TYPE.PYDML)
// DO NOT USE ; at the end of any statment, it throws
// NPE
script = pydml("A[$X:$Y,$X:$Z]=B\nC=A[$X:$Y,$X:$Y]").in("A", javaRDDA, fmA).in("B", javaRDDB, fmB).in("$X", 1).in("$Y", 3).in("$Z", 4).out("A").out("C");
} else if (inputType == IO_TYPE.RDD_STR_CSV || inputType == IO_TYPE.RDD_STR_IJV) {
RDD<String> rddA = JavaRDD.toRDD(javaRDDA);
RDD<String> rddB = JavaRDD.toRDD(javaRDDB);
if (script_type == SCRIPT_TYPE.DML)
script = dml("A[2:3,2:4]=B;C=A[2:3,2:3]").in("A", rddA, fmA).in("B", rddB, fmB).out("A").out("C");
else if (script_type == SCRIPT_TYPE.PYDML)
// DO NOT USE ; at the end of any statment, it throws
// NPE
script = pydml("A[$X:$Y,$X:$Z]=B\nC=A[$X:$Y,$X:$Y]").in("A", rddA, fmA).in("B", rddB, fmB).in("$X", 1).in("$Y", 3).in("$Z", 4).out("A").out("C");
}
}
} else {
// Input type is file
String fileA = null, fileB = null;
if (format == FrameFormat.CSV) {
fileA = baseDirectory + File.separator + "FrameA.csv";
fileB = baseDirectory + File.separator + "FrameB.csv";
} else if (format == FrameFormat.IJV) {
fileA = baseDirectory + File.separator + "FrameA.ijv";
fileB = baseDirectory + File.separator + "FrameB.ijv";
}
if (script_type == SCRIPT_TYPE.DML)
script = dml("A=read($A); B=read($B);A[2:3,2:4]=B;C=A[2:3,2:3];A[1,1]=234").in("$A", fileA, fmA).in("$B", fileB, fmB).out("A").out("C");
else if (script_type == SCRIPT_TYPE.PYDML)
// DO NOT USE ; at the end of any statment, it throws NPE
script = pydml("A=load($A)\nB=load($B)\nA[$X:$Y,$X:$Z]=B\nC=A[$X:$Y,$X:$Y]").in("$A", fileA).in("$B", fileB).in("$X", 1).in("$Y", 3).in("$Z", 4).out("A").out("C");
}
MLResults mlResults = ml.execute(script);
// Validate output schema
List<ValueType> lschemaOutA = Arrays.asList(mlResults.getFrameObject("A").getSchema());
List<ValueType> lschemaOutC = Arrays.asList(mlResults.getFrameObject("C").getSchema());
Assert.assertEquals(ValueType.INT, lschemaOutA.get(0));
Assert.assertEquals(ValueType.STRING, lschemaOutA.get(1));
Assert.assertEquals(ValueType.DOUBLE, lschemaOutA.get(2));
Assert.assertEquals(ValueType.BOOLEAN, lschemaOutA.get(3));
Assert.assertEquals(ValueType.STRING, lschemaOutC.get(0));
Assert.assertEquals(ValueType.DOUBLE, lschemaOutC.get(1));
if (outputType == IO_TYPE.JAVA_RDD_STR_CSV) {
JavaRDD<String> javaRDDStringCSVA = mlResults.getJavaRDDStringCSV("A");
List<String> linesA = javaRDDStringCSVA.collect();
Assert.assertEquals("1,Str2,3.0,true", linesA.get(0));
Assert.assertEquals("4,Str12,13.0,true", linesA.get(1));
Assert.assertEquals("7,Str25,26.0,false", linesA.get(2));
JavaRDD<String> javaRDDStringCSVC = mlResults.getJavaRDDStringCSV("C");
List<String> linesC = javaRDDStringCSVC.collect();
Assert.assertEquals("Str12,13.0", linesC.get(0));
Assert.assertEquals("Str25,26.0", linesC.get(1));
} else if (outputType == IO_TYPE.JAVA_RDD_STR_IJV) {
JavaRDD<String> javaRDDStringIJVA = mlResults.getJavaRDDStringIJV("A");
List<String> linesA = javaRDDStringIJVA.collect();
Assert.assertEquals("1 1 1", linesA.get(0));
Assert.assertEquals("1 2 Str2", linesA.get(1));
Assert.assertEquals("1 3 3.0", linesA.get(2));
Assert.assertEquals("1 4 true", linesA.get(3));
Assert.assertEquals("2 1 4", linesA.get(4));
Assert.assertEquals("2 2 Str12", linesA.get(5));
Assert.assertEquals("2 3 13.0", linesA.get(6));
Assert.assertEquals("2 4 true", linesA.get(7));
JavaRDD<String> javaRDDStringIJVC = mlResults.getJavaRDDStringIJV("C");
List<String> linesC = javaRDDStringIJVC.collect();
Assert.assertEquals("1 1 Str12", linesC.get(0));
Assert.assertEquals("1 2 13.0", linesC.get(1));
Assert.assertEquals("2 1 Str25", linesC.get(2));
Assert.assertEquals("2 2 26.0", linesC.get(3));
} else if (outputType == IO_TYPE.RDD_STR_CSV) {
RDD<String> rddStringCSVA = mlResults.getRDDStringCSV("A");
Iterator<String> iteratorA = rddStringCSVA.toLocalIterator();
Assert.assertEquals("1,Str2,3.0,true", iteratorA.next());
Assert.assertEquals("4,Str12,13.0,true", iteratorA.next());
Assert.assertEquals("7,Str25,26.0,false", iteratorA.next());
RDD<String> rddStringCSVC = mlResults.getRDDStringCSV("C");
Iterator<String> iteratorC = rddStringCSVC.toLocalIterator();
Assert.assertEquals("Str12,13.0", iteratorC.next());
Assert.assertEquals("Str25,26.0", iteratorC.next());
} else if (outputType == IO_TYPE.RDD_STR_IJV) {
RDD<String> rddStringIJVA = mlResults.getRDDStringIJV("A");
Iterator<String> iteratorA = rddStringIJVA.toLocalIterator();
Assert.assertEquals("1 1 1", iteratorA.next());
Assert.assertEquals("1 2 Str2", iteratorA.next());
Assert.assertEquals("1 3 3.0", iteratorA.next());
Assert.assertEquals("1 4 true", iteratorA.next());
Assert.assertEquals("2 1 4", iteratorA.next());
Assert.assertEquals("2 2 Str12", iteratorA.next());
Assert.assertEquals("2 3 13.0", iteratorA.next());
Assert.assertEquals("2 4 true", iteratorA.next());
Assert.assertEquals("3 1 7", iteratorA.next());
Assert.assertEquals("3 2 Str25", iteratorA.next());
Assert.assertEquals("3 3 26.0", iteratorA.next());
Assert.assertEquals("3 4 false", iteratorA.next());
RDD<String> rddStringIJVC = mlResults.getRDDStringIJV("C");
Iterator<String> iteratorC = rddStringIJVC.toLocalIterator();
Assert.assertEquals("1 1 Str12", iteratorC.next());
Assert.assertEquals("1 2 13.0", iteratorC.next());
Assert.assertEquals("2 1 Str25", iteratorC.next());
Assert.assertEquals("2 2 26.0", iteratorC.next());
} else if (outputType == IO_TYPE.DATAFRAME) {
Dataset<Row> dataFrameA = mlResults.getDataFrame("A").drop(RDDConverterUtils.DF_ID_COLUMN);
StructType dfschemaA = dataFrameA.schema();
StructField structTypeA = dfschemaA.apply(0);
Assert.assertEquals(DataTypes.LongType, structTypeA.dataType());
structTypeA = dfschemaA.apply(1);
Assert.assertEquals(DataTypes.StringType, structTypeA.dataType());
structTypeA = dfschemaA.apply(2);
Assert.assertEquals(DataTypes.DoubleType, structTypeA.dataType());
structTypeA = dfschemaA.apply(3);
Assert.assertEquals(DataTypes.BooleanType, structTypeA.dataType());
List<Row> listAOut = dataFrameA.collectAsList();
Row row1 = listAOut.get(0);
Assert.assertEquals("Mismatch with expected value", Long.valueOf(1), row1.get(0));
Assert.assertEquals("Mismatch with expected value", "Str2", row1.get(1));
Assert.assertEquals("Mismatch with expected value", 3.0, row1.get(2));
Assert.assertEquals("Mismatch with expected value", true, row1.get(3));
Row row2 = listAOut.get(1);
Assert.assertEquals("Mismatch with expected value", Long.valueOf(4), row2.get(0));
Assert.assertEquals("Mismatch with expected value", "Str12", row2.get(1));
Assert.assertEquals("Mismatch with expected value", 13.0, row2.get(2));
Assert.assertEquals("Mismatch with expected value", true, row2.get(3));
Dataset<Row> dataFrameC = mlResults.getDataFrame("C").drop(RDDConverterUtils.DF_ID_COLUMN);
StructType dfschemaC = dataFrameC.schema();
StructField structTypeC = dfschemaC.apply(0);
Assert.assertEquals(DataTypes.StringType, structTypeC.dataType());
structTypeC = dfschemaC.apply(1);
Assert.assertEquals(DataTypes.DoubleType, structTypeC.dataType());
List<Row> listCOut = dataFrameC.collectAsList();
Row row3 = listCOut.get(0);
Assert.assertEquals("Mismatch with expected value", "Str12", row3.get(0));
Assert.assertEquals("Mismatch with expected value", 13.0, row3.get(1));
Row row4 = listCOut.get(1);
Assert.assertEquals("Mismatch with expected value", "Str25", row4.get(0));
Assert.assertEquals("Mismatch with expected value", 26.0, row4.get(1));
} else {
String[][] frameA = mlResults.getFrameAs2DStringArray("A");
Assert.assertEquals("Str2", frameA[0][1]);
Assert.assertEquals("3.0", frameA[0][2]);
Assert.assertEquals("13.0", frameA[1][2]);
Assert.assertEquals("true", frameA[1][3]);
Assert.assertEquals("Str25", frameA[2][1]);
String[][] frameC = mlResults.getFrameAs2DStringArray("C");
Assert.assertEquals("Str12", frameC[0][0]);
Assert.assertEquals("Str25", frameC[1][0]);
Assert.assertEquals("13.0", frameC[0][1]);
Assert.assertEquals("26.0", frameC[1][1]);
}
}
use of org.apache.spark.sql.types.StructField in project incubator-systemml by apache.
the class FrameRDDConverterUtils method convertFrameSchemaToDFSchema.
/**
* This function will convert Frame schema into DataFrame schema
*
* @param fschema frame schema
* @param containsID true if contains ID column
* @return Spark StructType of StructFields representing schema
*/
public static StructType convertFrameSchemaToDFSchema(ValueType[] fschema, boolean containsID) {
// generate the schema based on the string of schema
List<StructField> fields = new ArrayList<>();
// add id column type
if (containsID)
fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
// add remaining types
int col = 1;
for (ValueType schema : fschema) {
DataType dt = null;
switch(schema) {
case STRING:
dt = DataTypes.StringType;
break;
case DOUBLE:
dt = DataTypes.DoubleType;
break;
case INT:
dt = DataTypes.LongType;
break;
case BOOLEAN:
dt = DataTypes.BooleanType;
break;
default:
dt = DataTypes.StringType;
LOG.warn("Using default type String for " + schema.toString());
}
fields.add(DataTypes.createStructField("C" + col++, dt, true));
}
return DataTypes.createStructType(fields);
}
use of org.apache.spark.sql.types.StructField in project incubator-systemml by apache.
the class FrameRDDConverterUtils method convertDFSchemaToFrameSchema.
/**
* NOTE: regarding the support of vector columns, we make the following
* schema restriction: single vector column, which allows inference of
* the vector length without data access and covers the common case.
*
* @param dfschema schema as StructType
* @param colnames column names
* @param fschema array of SystemML ValueTypes
* @param containsID if true, contains ID column
* @return 0-based column index of vector column, -1 if no vector.
*/
public static int convertDFSchemaToFrameSchema(StructType dfschema, String[] colnames, ValueType[] fschema, boolean containsID) {
// basic meta data
int off = containsID ? 1 : 0;
boolean containsVect = false;
int lenVect = fschema.length - (dfschema.fields().length - off) + 1;
int colVect = -1;
// process individual columns
for (int i = off, pos = 0; i < dfschema.fields().length; i++) {
StructField structType = dfschema.apply(i);
colnames[pos] = structType.name();
if (structType.dataType() == DataTypes.DoubleType || structType.dataType() == DataTypes.FloatType)
fschema[pos++] = ValueType.DOUBLE;
else if (structType.dataType() == DataTypes.LongType || structType.dataType() == DataTypes.IntegerType)
fschema[pos++] = ValueType.INT;
else if (structType.dataType() == DataTypes.BooleanType)
fschema[pos++] = ValueType.BOOLEAN;
else if (structType.dataType() instanceof VectorUDT) {
if (containsVect)
throw new RuntimeException("Found invalid second vector column.");
String name = colnames[pos];
colVect = pos;
for (int j = 0; j < lenVect; j++) {
colnames[pos] = name + "v" + j;
fschema[pos++] = ValueType.DOUBLE;
}
containsVect = true;
} else
fschema[pos++] = ValueType.STRING;
}
return colVect;
}
use of org.apache.spark.sql.types.StructField in project incubator-systemml by apache.
the class RDDConverterUtils method binaryBlockToDataFrame.
public static Dataset<Row> binaryBlockToDataFrame(SparkSession sparkSession, JavaPairRDD<MatrixIndexes, MatrixBlock> in, MatrixCharacteristics mc, boolean toVector) {
if (!mc.colsKnown())
throw new RuntimeException("Number of columns needed to convert binary block to data frame.");
// slice blocks into rows, align and convert into data frame rows
JavaRDD<Row> rowsRDD = in.flatMapToPair(new SliceBinaryBlockToRowsFunction(mc.getRowsPerBlock())).groupByKey().map(new ConvertRowBlocksToRows((int) mc.getCols(), mc.getColsPerBlock(), toVector));
// create data frame schema
List<StructField> fields = new ArrayList<>();
fields.add(DataTypes.createStructField(DF_ID_COLUMN, DataTypes.DoubleType, false));
if (toVector)
fields.add(DataTypes.createStructField("C1", new VectorUDT(), false));
else {
// row
for (int i = 1; i <= mc.getCols(); i++) fields.add(DataTypes.createStructField("C" + i, DataTypes.DoubleType, false));
}
// rdd to data frame conversion
return sparkSession.createDataFrame(rowsRDD.rdd(), DataTypes.createStructType(fields));
}
Aggregations