use of org.apache.spark.sql.SparkSession in project incubator-systemml by apache.
the class MLContextParforDatasetTest method runMLContextParforDatasetTest.
private void runMLContextParforDatasetTest(boolean vector, boolean unknownDims, boolean multiInputs) {
// modify memory budget to trigger fused datapartition-execute
long oldmem = InfrastructureAnalyzer.getLocalMaxMemory();
// 1MB
InfrastructureAnalyzer.setLocalMaxMemory(1 * 1024 * 1024);
try {
double[][] A = getRandomMatrix(rows, cols, -10, 10, sparsity, 76543);
MatrixBlock mbA = DataConverter.convertToMatrixBlock(A);
int blksz = ConfigurationManager.getBlocksize();
MatrixCharacteristics mc1 = new MatrixCharacteristics(rows, cols, blksz, blksz, mbA.getNonZeros());
MatrixCharacteristics mc2 = unknownDims ? new MatrixCharacteristics() : new MatrixCharacteristics(mc1);
// create input dataset
SparkSession sparkSession = SparkSession.builder().sparkContext(sc.sc()).getOrCreate();
JavaPairRDD<MatrixIndexes, MatrixBlock> in = SparkExecutionContext.toMatrixJavaPairRDD(sc, mbA, blksz, blksz);
Dataset<Row> df = RDDConverterUtils.binaryBlockToDataFrame(sparkSession, in, mc1, vector);
MatrixMetadata mm = new MatrixMetadata(vector ? MatrixFormat.DF_VECTOR_WITH_INDEX : MatrixFormat.DF_DOUBLES_WITH_INDEX);
mm.setMatrixCharacteristics(mc2);
String s1 = "v = matrix(0, rows=nrow(X), cols=1)" + "parfor(i in 1:nrow(X), log=DEBUG) {" + " v[i, ] = sum(X[i, ]);" + "}" + "r = sum(v);";
String s2 = "v = matrix(0, rows=nrow(X), cols=1)" + "Y = X;" + "parfor(i in 1:nrow(X), log=DEBUG) {" + " v[i, ] = sum(X[i, ]+Y[i, ]);" + "}" + "r = sum(v);";
String s = multiInputs ? s2 : s1;
ml.setExplain(true);
ml.setExplainLevel(ExplainLevel.RUNTIME);
ml.setStatistics(true);
Script script = dml(s).in("X", df, mm).out("r");
MLResults results = ml.execute(script);
// compare aggregation results
double sum1 = results.getDouble("r");
double sum2 = mbA.sum() * (multiInputs ? 2 : 1);
TestUtils.compareScalars(sum2, sum1, 0.000001);
} catch (Exception ex) {
ex.printStackTrace();
throw new RuntimeException(ex);
} finally {
InfrastructureAnalyzer.setLocalMaxMemory(oldmem);
}
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class CsvToDatasetApp method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("CSV to Dataset").master("local").getOrCreate();
String filename = "data/tuple-data-file.csv";
Dataset<Row> df = spark.read().format("csv").option("inferSchema", "true").option("header", "false").load(filename);
df.show();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class JsonArrayToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("JSON array to Dataset").master("local").getOrCreate();
String filename = "data/array.json";
long start = System.currentTimeMillis();
Dataset<Row> df = spark.read().json(filename);
long stop = System.currentTimeMillis();
System.out.println("Processing took " + (stop - start) + " ms");
df.show();
df.printSchema();
// Turns the "one liner" into a real column
df = df.select(explode(df.col("valsInArrays"))).toDF("vals");
df.show();
df.printSchema();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class JsonMapToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("JSON map to Dataset").master("local").getOrCreate();
String filename = "data/map.json";
long start = System.currentTimeMillis();
Dataset<Row> df = spark.read().json(filename);
long stop = System.currentTimeMillis();
System.out.println("Processing took " + (stop - start) + " ms");
df.show();
df.printSchema();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class QuotedCsvWithHeaderToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("CSV to Dataset").master("local").getOrCreate();
String filename = "data/csv-quoted.txt";
Dataset<Row> df = spark.read().option("inferSchema", "true").option("header", "true").csv(filename);
df.show();
df.printSchema();
}
Aggregations