Search in sources :

Example 46 with SparkSession

use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.

the class ReducerApp method start.

private void start() {
    SparkSession spark = SparkSession.builder().master("local").getOrCreate();
    List<Integer> data = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
    Dataset<Integer> df = spark.createDataset(data, Encoders.INT());
    df.show();
    df.printSchema();
    Integer sumByReduce = df.reduce(new SumByReduce());
    System.out.println("Sum should be 55 and it is... " + sumByReduce);
}
Also used : SparkSession(org.apache.spark.sql.SparkSession)

Example 47 with SparkSession

use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.

the class BuildDataFrameFromScratch method start.

private void start() {
    SparkSession spark = SparkSession.builder().appName("Build a DataFrame from Scratch").master("local[*]").getOrCreate();
    List<String> stringAsList = new ArrayList<>();
    stringAsList.add("bar");
    JavaSparkContext sparkContext = new JavaSparkContext(spark.sparkContext());
    JavaRDD<Row> rowRDD = sparkContext.parallelize(stringAsList).map((String row) -> RowFactory.create(row));
    // Creates schema
    StructType schema = DataTypes.createStructType(new StructField[] { DataTypes.createStructField("foe", DataTypes.StringType, false) });
    Dataset<Row> df = spark.sqlContext().createDataFrame(rowRDD, schema).toDF();
    log.debug("** Schema: ");
    df.printSchema();
    log.debug("** Data: ");
    df.show();
    sparkContext.close();
}
Also used : SparkSession(org.apache.spark.sql.SparkSession) StructType(org.apache.spark.sql.types.StructType) ArrayList(java.util.ArrayList) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) Row(org.apache.spark.sql.Row)

Example 48 with SparkSession

use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.

the class IngestionJoinSave method start.

private void start() {
    SparkSession spark = SparkSession.builder().appName("Authors and Books").master("local").getOrCreate();
    String filename = "data/authors.csv";
    // @formatter:off
    Dataset<Row> authorsDf = spark.read().format("csv").option("inferSchema", "true").option("header", "true").option("dateFormat", "mm/dd/yy").load(filename);
    // @formatter:on
    authorsDf.show();
    filename = "data/books.csv";
    // @formatter:off
    Dataset<Row> booksDf = spark.read().format("csv").option("inferSchema", "true").option("header", "true").load(filename);
    // @formatter:on
    booksDf.show();
    // @formatter:off
    Dataset<Row> libraryDf = authorsDf.join(booksDf, authorsDf.col("id").equalTo(booksDf.col("authorId")), "full_outer").withColumn("bookId", booksDf.col("id")).drop(booksDf.col("id"));
    // @formatter:on
    libraryDf.show();
    libraryDf.printSchema();
    libraryDf.write().json("data/library.json");
}
Also used : SparkSession(org.apache.spark.sql.SparkSession) Row(org.apache.spark.sql.Row)

Example 49 with SparkSession

use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.

the class AuthorsWithNoBooks method start.

private void start() {
    SparkSession spark = SparkSession.builder().appName("Authors and Books").master("local").getOrCreate();
    String filename = "data/authors.csv";
    // @formatter:off
    Dataset<Row> authorsDf = spark.read().format("csv").option("inferSchema", "true").option("header", "true").load(filename);
    // @formatter:on
    filename = "data/books.csv";
    // @formatter:off
    Dataset<Row> booksDf = spark.read().format("csv").option("inferSchema", "true").option("header", "true").load(filename);
    // @formatter:on
    Dataset<Row> libraryDf = authorsDf.join(booksDf, authorsDf.col("id").equalTo(booksDf.col("authorId")), "left_anti");
    libraryDf.show();
    libraryDf.printSchema();
}
Also used : SparkSession(org.apache.spark.sql.SparkSession) Row(org.apache.spark.sql.Row)

Example 50 with SparkSession

use of org.apache.spark.sql.SparkSession in project net.jgp.labs.spark by jgperrin.

the class ClaimProcessApp method start.

private void start() {
    SparkSession spark = SparkSession.builder().appName("For Each Claim").master("local").getOrCreate();
    String filename = "data/claims.csv";
    Dataset<Row> claimsDf = spark.read().format("csv").option("inferSchema", "true").option("header", "true").load(filename);
    claimsDf.show();
    claimsDf.foreach(new ClaimPrepAndProcess());
}
Also used : SparkSession(org.apache.spark.sql.SparkSession) Row(org.apache.spark.sql.Row)

Aggregations

SparkSession (org.apache.spark.sql.SparkSession)53 Row (org.apache.spark.sql.Row)43 StructType (org.apache.spark.sql.types.StructType)11 ArrayList (java.util.ArrayList)6 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)6 StructField (org.apache.spark.sql.types.StructField)6 SparkConf (org.apache.spark.SparkConf)4 JavaRDD (org.apache.spark.api.java.JavaRDD)3 Script (org.apache.sysml.api.mlcontext.Script)3 Test (org.junit.Test)3 Dataset (org.apache.spark.sql.Dataset)2 StreamingQuery (org.apache.spark.sql.streaming.StreamingQuery)2 StreamingQueryException (org.apache.spark.sql.streaming.StreamingQueryException)2 DMLScript (org.apache.sysml.api.DMLScript)2 RUNTIME_PLATFORM (org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM)2 MLContext (org.apache.sysml.api.mlcontext.MLContext)2 Matrix (org.apache.sysml.api.mlcontext.Matrix)2 MatrixBlock (org.apache.sysml.runtime.matrix.data.MatrixBlock)2 MatrixIndexes (org.apache.sysml.runtime.matrix.data.MatrixIndexes)2 File (java.io.File)1