use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class ArrayToDatasetApp method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("Array to Dataset").master("spark://10.0.100.81:7077").getOrCreate();
String[] l = new String[] { "a", "b", "c", "d" };
List<String> data = Arrays.asList(l);
Dataset<String> df = spark.createDataset(data, Encoders.STRING());
df.show();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class BooksCsvToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("Book CSV to Dataset").master("local").getOrCreate();
String filename = "data/books.csv";
// @formatter:off
Dataset<Row> df = spark.read().format("csv").option("inferSchema", // We are not inferring the schema for now
"false").option("header", "true").load(filename);
// @formatter:on
df.show();
// In this case everything is a string
df.printSchema();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class JsonComplexArrayToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("Complex JSON array to Dataset").master("local").getOrCreate();
String filename = "data/array-complex.json";
long start = System.currentTimeMillis();
Dataset<Row> df = spark.read().json(filename);
long stop = System.currentTimeMillis();
System.out.println("Processing took " + (stop - start) + " ms");
df.show();
df.printSchema();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class JsonToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("JSON to Dataset").master("local").getOrCreate();
String filename = "data/north-carolina-school-performance-data.json";
long start = System.currentTimeMillis();
Dataset<Row> df = spark.read().json(filename);
long stop = System.currentTimeMillis();
System.out.println("Processing took " + (stop - start) + " ms");
df.show();
df.printSchema();
// Flatenization
df = df.withColumn("district", df.col("fields.district"));
// this does not work as the column stays here (Spark 2.0.0)
df = df.drop(df.col("fields.district"));
df.show();
df.printSchema();
}
use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.
the class CustomDataSourceToDataset method start.
private void start() {
SparkSession spark = SparkSession.builder().appName("Custom data set to Dataset").master("local").getOrCreate();
String filename = "data/array-complex.json";
long start = System.currentTimeMillis();
Dataset<Row> df = spark.read().format("net.jgp.labs.spark.x.datasource.SubStringCounterDataSource").option(K.COUNT + "0", // count the number of 'a'
"a").option(K.COUNT + "1", // count the number of 'b'
"b").option(K.COUNT + "2", // count the number of 'color'
"color").load(// local file
filename);
long stop = System.currentTimeMillis();
log.info("Processing took {} ms", stop - start);
df.printSchema();
df.show();
}
Aggregations