use of org.apache.spark.sql.Row in project incubator-systemml by apache.
the class MLContextFrameTest method testTransform.
@Test
public void testTransform() {
System.out.println("MLContextFrameTest - transform");
Row[] rowsA = { RowFactory.create("\"`@(\"(!&", 2, "20news-bydate-train/comp.os.ms-windows.misc/9979"), RowFactory.create("\"`@(\"\"(!&\"", 3, "20news-bydate-train/comp.os.ms-windows.misc/9979") };
JavaRDD<Row> javaRddRowA = sc.parallelize(Arrays.asList(rowsA));
List<StructField> fieldsA = new ArrayList<StructField>();
fieldsA.add(DataTypes.createStructField("featureName", DataTypes.StringType, true));
fieldsA.add(DataTypes.createStructField("featureValue", DataTypes.IntegerType, true));
fieldsA.add(DataTypes.createStructField("id", DataTypes.StringType, true));
StructType schemaA = DataTypes.createStructType(fieldsA);
Dataset<Row> dataFrameA = spark.createDataFrame(javaRddRowA, schemaA);
String dmlString = "[tA, tAM] = transformencode (target = A, spec = \"{ids: false ,recode: [ featureName, id ]}\");";
Script script = dml(dmlString).in("A", dataFrameA, new FrameMetadata(FrameFormat.CSV, dataFrameA.count(), (long) dataFrameA.columns().length)).out("tA").out("tAM");
ml.setExplain(true);
ml.setExplainLevel(ExplainLevel.RECOMPILE_HOPS);
MLResults results = ml.execute(script);
double[][] matrixtA = results.getMatrixAs2DDoubleArray("tA");
Assert.assertEquals(1.0, matrixtA[0][2], 0.0);
Dataset<Row> dataFrame_tA = results.getMatrix("tA").toDF();
System.out.println("Number of matrix tA rows = " + dataFrame_tA.count());
dataFrame_tA.printSchema();
dataFrame_tA.show();
Dataset<Row> dataFrame_tAM = results.getFrame("tAM").toDF();
System.out.println("Number of frame tAM rows = " + dataFrame_tAM.count());
dataFrame_tAM.printSchema();
dataFrame_tAM.show();
}
use of org.apache.spark.sql.Row in project incubator-systemml by apache.
the class MLContextFrameTest method testInputFrameAndMatrixOutputMatrix.
@Test
public void testInputFrameAndMatrixOutputMatrix() {
System.out.println("MLContextFrameTest - input frame and matrix, output matrix");
List<String> dataA = new ArrayList<String>();
dataA.add("Test1,4.0");
dataA.add("Test2,5.0");
dataA.add("Test3,6.0");
JavaRDD<String> javaRddStringA = sc.parallelize(dataA);
ValueType[] schema = { ValueType.STRING, ValueType.DOUBLE };
List<String> dataB = new ArrayList<String>();
dataB.add("1.0");
dataB.add("2.0");
JavaRDD<String> javaRddStringB = sc.parallelize(dataB);
JavaRDD<Row> javaRddRowA = FrameRDDConverterUtils.csvToRowRDD(sc, javaRddStringA, CSV_DELIM, schema);
JavaRDD<Row> javaRddRowB = javaRddStringB.map(new CommaSeparatedValueStringToDoubleArrayRow());
List<StructField> fieldsA = new ArrayList<StructField>();
fieldsA.add(DataTypes.createStructField("1", DataTypes.StringType, true));
fieldsA.add(DataTypes.createStructField("2", DataTypes.DoubleType, true));
StructType schemaA = DataTypes.createStructType(fieldsA);
Dataset<Row> dataFrameA = spark.createDataFrame(javaRddRowA, schemaA);
List<StructField> fieldsB = new ArrayList<StructField>();
fieldsB.add(DataTypes.createStructField("1", DataTypes.DoubleType, true));
StructType schemaB = DataTypes.createStructType(fieldsB);
Dataset<Row> dataFrameB = spark.createDataFrame(javaRddRowB, schemaB);
String dmlString = "[tA, tAM] = transformencode (target = A, spec = \"{ids: true ,recode: [ 1, 2 ]}\");\n" + "C = tA %*% B;\n" + "M = s * C;";
Script script = dml(dmlString).in("A", dataFrameA, new FrameMetadata(FrameFormat.CSV, dataFrameA.count(), (long) dataFrameA.columns().length)).in("B", dataFrameB, new MatrixMetadata(MatrixFormat.CSV, dataFrameB.count(), (long) dataFrameB.columns().length)).in("s", 2).out("M");
MLResults results = ml.execute(script);
double[][] matrix = results.getMatrixAs2DDoubleArray("M");
Assert.assertEquals(6.0, matrix[0][0], 0.0);
Assert.assertEquals(12.0, matrix[1][0], 0.0);
Assert.assertEquals(18.0, matrix[2][0], 0.0);
}
use of org.apache.spark.sql.Row in project incubator-systemml by apache.
the class MLContextParforDatasetTest method runMLContextParforDatasetTest.
private void runMLContextParforDatasetTest(boolean vector, boolean unknownDims, boolean multiInputs) {
// modify memory budget to trigger fused datapartition-execute
long oldmem = InfrastructureAnalyzer.getLocalMaxMemory();
// 1MB
InfrastructureAnalyzer.setLocalMaxMemory(1 * 1024 * 1024);
try {
double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 76543);
MatrixBlock mbA = DataConverter.convertToMatrixBlock(A);
int blksz = ConfigurationManager.getBlocksize();
MatrixCharacteristics mc1 = new MatrixCharacteristics(rows, cols, blksz, blksz, mbA.getNonZeros());
MatrixCharacteristics mc2 = unknownDims ? new MatrixCharacteristics() : new MatrixCharacteristics(mc1);
// create input dataset
SparkSession sparkSession = SparkSession.builder().sparkContext(sc.sc()).getOrCreate();
JavaPairRDD<MatrixIndexes, MatrixBlock> in = SparkExecutionContext.toMatrixJavaPairRDD(sc, mbA, blksz, blksz);
Dataset<Row> df = RDDConverterUtils.binaryBlockToDataFrame(sparkSession, in, mc1, vector);
MatrixMetadata mm = new MatrixMetadata(vector ? MatrixFormat.DF_VECTOR_WITH_INDEX : MatrixFormat.DF_DOUBLES_WITH_INDEX);
mm.setMatrixCharacteristics(mc2);
String s1 = "v = matrix(0, rows=nrow(X), cols=1)" + "parfor(i in 1:nrow(X), log=DEBUG) {" + " v[i, ] = sum(X[i, ]);" + "}" + "r = sum(v);";
String s2 = "v = matrix(0, rows=nrow(X), cols=1)" + "Y = X;" + "parfor(i in 1:nrow(X), log=DEBUG) {" + " v[i, ] = sum(X[i, ]+Y[i, ]);" + "}" + "r = sum(v);";
String s = multiInputs ? s2 : s1;
ml.setExplain(true);
ml.setExplainLevel(ExplainLevel.RUNTIME);
ml.setStatistics(true);
Script script = dml(s).in("X", df, mm).out("r");
MLResults results = ml.execute(script);
// compare aggregation results
double sum1 = results.getDouble("r");
double sum2 = mbA.sum() * (multiInputs ? 2 : 1);
TestUtils.compareScalars(sum2, sum1, 0.000001);
} catch (Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
} finally {
InfrastructureAnalyzer.setLocalMaxMemory(oldmem);
}
}
use of org.apache.spark.sql.Row in project ignite by apache.
the class JavaStandaloneIgniteRDDSelfTest method testAllFieldsTypes.
/**
* @throws Exception If failed.
*/
public void testAllFieldsTypes() throws Exception {
JavaSparkContext sc = new JavaSparkContext("local[*]", "test");
final int cnt = 100;
try {
JavaIgniteContext<String, EntityTestAllTypeFields> ic = new JavaIgniteContext<>(sc, new IgniteConfigProvider());
JavaIgniteRDD<String, EntityTestAllTypeFields> cache = ic.fromCache(ENTITY_ALL_TYPES_CACHE_NAME);
cache.savePairs(sc.parallelize(F.range(0, cnt), 2).mapToPair(INT_TO_ENTITY_ALL_FIELDS_F));
EntityTestAllTypeFields e = new EntityTestAllTypeFields(cnt / 2);
for (Field f : EntityTestAllTypeFields.class.getDeclaredFields()) {
String fieldName = f.getName();
Object val = GridTestUtils.getFieldValue(e, fieldName);
Dataset<Row> df = cache.sql(String.format("select %s from EntityTestAllTypeFields where %s = ?", fieldName, fieldName), val);
if (val instanceof BigDecimal) {
Object res = ((Row[]) df.collect())[0].get(0);
assertTrue(String.format("+++ Fail on %s field", fieldName), ((Comparable<BigDecimal>) val).compareTo((BigDecimal) res) == 0);
} else if (val instanceof java.sql.Date)
assertEquals(String.format("+++ Fail on %s field", fieldName), val.toString(), ((Row[]) df.collect())[0].get(0).toString());
else if (val.getClass().isArray())
assertTrue(String.format("+++ Fail on %s field", fieldName), 1 <= df.count());
else {
assertTrue(String.format("+++ Fail on %s field", fieldName), ((Row[]) df.collect()).length > 0);
assertTrue(String.format("+++ Fail on %s field", fieldName), ((Row[]) df.collect())[0].size() > 0);
assertEquals(String.format("+++ Fail on %s field", fieldName), val, ((Row[]) df.collect())[0].get(0));
}
info(String.format("+++ Query on the filed: %s : %s passed", fieldName, f.getType().getSimpleName()));
}
} finally {
sc.stop();
}
}
use of org.apache.spark.sql.Row in project net.jgp.labs.spark by jgperrin.
the class CsvToDatasetApp method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("CSV to Dataset").master("local").getOrCreate();
String filename = "data/tuple-data-file.csv";
Dataset<Row> df = spark.read().format("csv").option("inferSchema", "true").option("header", "false").load(filename);
df.show();
}
Aggregations