Search in sources :

Example 21 with Function

use of org.apache.spark.api.java.function.Function in project gatk by broadinstitute.

the class ExampleIntervalWalkerSpark method intervalFunction.

private static Function<IntervalWalkerContext, String> intervalFunction(List<FeatureInput<VariantContext>> auxiliaryVariants) {
    return (Function<IntervalWalkerContext, String>) context -> {
        SimpleInterval interval = context.getInterval();
        ReadsContext readsContext = context.getReadsContext();
        ReferenceContext referenceContext = context.getReferenceContext();
        FeatureContext featureContext = context.getFeatureContext();
        StringBuilder sb = new StringBuilder();
        sb.append(String.format("Current interval: " + interval));
        sb.append("\n");
        if (referenceContext.hasBackingDataSource()) {
            sb.append(String.format("\tOverlapping reference bases: %s\n\n", new String(referenceContext.getBases())));
        }
        if (readsContext.hasBackingDataSource()) {
            for (final GATKRead read : readsContext) {
                sb.append(String.format("\tOverlapping read at %s:%d-%d\n", read.getContig(), read.getStart(), read.getEnd()));
            }
            sb.append("\n");
        }
        if (featureContext.hasBackingDataSource()) {
            for (final VariantContext variant : featureContext.getValues(auxiliaryVariants)) {
                sb.append(String.format("\tOverlapping variant at %s:%d-%d. Ref: %s Alt(s): %s\n", variant.getContig(), variant.getStart(), variant.getEnd(), variant.getReference(), variant.getAlternateAlleles()));
            }
            sb.append("\n");
        }
        return sb.toString();
    };
}
Also used : GATKRead(org.broadinstitute.hellbender.utils.read.GATKRead) Function(org.apache.spark.api.java.function.Function) VariantContext(htsjdk.variant.variantcontext.VariantContext) SimpleInterval(org.broadinstitute.hellbender.utils.SimpleInterval)

Example 22 with Function

use of org.apache.spark.api.java.function.Function in project incubator-systemml by apache.

the class RDDConverterUtilsExt method stringDataFrameToVectorDataFrame.

/**
 * Convert a dataframe of comma-separated string rows to a dataframe of
 * ml.linalg.Vector rows.
 *
 * <p>
 * Example input rows:<br>
 *
 * <code>
 * ((1.2, 4.3, 3.4))<br>
 * (1.2, 3.4, 2.2)<br>
 * [[1.2, 34.3, 1.2, 1.25]]<br>
 * [1.2, 3.4]<br>
 * </code>
 *
 * @param sparkSession
 *            Spark Session
 * @param inputDF
 *            dataframe of comma-separated row strings to convert to
 *            dataframe of ml.linalg.Vector rows
 * @return dataframe of ml.linalg.Vector rows
 */
public static Dataset<Row> stringDataFrameToVectorDataFrame(SparkSession sparkSession, Dataset<Row> inputDF) {
    StructField[] oldSchema = inputDF.schema().fields();
    StructField[] newSchema = new StructField[oldSchema.length];
    for (int i = 0; i < oldSchema.length; i++) {
        String colName = oldSchema[i].name();
        newSchema[i] = DataTypes.createStructField(colName, new VectorUDT(), true);
    }
    // converter
    class StringToVector implements Function<Tuple2<Row, Long>, Row> {

        private static final long serialVersionUID = -4733816995375745659L;

        @Override
        public Row call(Tuple2<Row, Long> arg0) throws Exception {
            Row oldRow = arg0._1;
            int oldNumCols = oldRow.length();
            if (oldNumCols > 1) {
                throw new DMLRuntimeException("The row must have at most one column");
            }
            // parse the various strings. i.e
            // ((1.2, 4.3, 3.4)) or (1.2, 3.4, 2.2)
            // [[1.2, 34.3, 1.2, 1.2]] or [1.2, 3.4]
            Object[] fields = new Object[oldNumCols];
            ArrayList<Object> fieldsArr = new ArrayList<Object>();
            for (int i = 0; i < oldRow.length(); i++) {
                Object ci = oldRow.get(i);
                if (ci == null) {
                    fieldsArr.add(null);
                } else if (ci instanceof String) {
                    String cis = (String) ci;
                    StringBuffer sb = new StringBuffer(cis.trim());
                    for (int nid = 0; i < 2; i++) {
                        // nesting
                        if ((sb.charAt(0) == '(' && sb.charAt(sb.length() - 1) == ')') || (sb.charAt(0) == '[' && sb.charAt(sb.length() - 1) == ']')) {
                            sb.deleteCharAt(0);
                            sb.setLength(sb.length() - 1);
                        }
                    }
                    // have the replace code
                    String ncis = "[" + sb.toString().replaceAll(" *, *", ",") + "]";
                    try {
                        // ncis [ ] will always result in double array return type
                        double[] doubles = (double[]) NumericParser.parse(ncis);
                        Vector dense = Vectors.dense(doubles);
                        fieldsArr.add(dense);
                    } catch (Exception e) {
                        // can't catch SparkException here in Java apparently
                        throw new DMLRuntimeException("Error converting to double array. " + e.getMessage(), e);
                    }
                } else {
                    throw new DMLRuntimeException("Only String is supported");
                }
            }
            Row row = RowFactory.create(fieldsArr.toArray());
            return row;
        }
    }
    // output DF
    JavaRDD<Row> newRows = inputDF.rdd().toJavaRDD().zipWithIndex().map(new StringToVector());
    Dataset<Row> outDF = sparkSession.createDataFrame(newRows.rdd(), DataTypes.createStructType(newSchema));
    return outDF;
}
Also used : VectorUDT(org.apache.spark.ml.linalg.VectorUDT) ArrayList(java.util.ArrayList) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException) IOException(java.io.IOException) DMLRuntimeException(org.apache.sysml.runtime.DMLRuntimeException) PairFlatMapFunction(org.apache.spark.api.java.function.PairFlatMapFunction) Function(org.apache.spark.api.java.function.Function) StructField(org.apache.spark.sql.types.StructField) Tuple2(scala.Tuple2) Row(org.apache.spark.sql.Row) Vector(org.apache.spark.ml.linalg.Vector)

Example 23 with Function

use of org.apache.spark.api.java.function.Function in project java_study by aloyschen.

the class RDD method combine_key.

public void combine_key() {
    JavaSparkContext sc = getSc();
    sc.setLogLevel("ERROR");
    List<Tuple2<String, Integer>> input = new ArrayList<>();
    input.add(new Tuple2<>("Java", 1));
    input.add(new Tuple2<>("C++", 1));
    input.add(new Tuple2<>("Java", 2));
    input.add(new Tuple2<>("Java", 1));
    JavaPairRDD<String, Integer> rdd = sc.parallelizePairs(input);
    Function<Integer, AvgCount> creatAvg = (Function<Integer, AvgCount>) x -> new AvgCount(x, 1);
    Function2<AvgCount, Integer, AvgCount> addAndCount = (Function2<AvgCount, Integer, AvgCount>) (a, x) -> {
        a.total_ += x;
        a.count_ += 1;
        return a;
    };
    Function2<AvgCount, AvgCount, AvgCount> combine = (Function2<AvgCount, AvgCount, AvgCount>) (a, b) -> {
        a.count_ += b.count_;
        a.total_ += b.total_;
        return a;
    };
    AvgCount initial = new AvgCount(0, 0);
    JavaPairRDD<String, AvgCount> avgCounts = rdd.combineByKey(creatAvg, addAndCount, combine);
    Map<String, AvgCount> countMap = avgCounts.collectAsMap();
    for (Map.Entry<String, AvgCount> entry : countMap.entrySet()) {
        System.out.println(entry.getKey() + ":" + entry.getValue().avg());
    }
}
Also used : ArrayList(java.util.ArrayList) Function2(org.apache.spark.api.java.function.Function2) Function(org.apache.spark.api.java.function.Function) PairFunction(org.apache.spark.api.java.function.PairFunction) FlatMapFunction(org.apache.spark.api.java.function.FlatMapFunction) Tuple2(scala.Tuple2) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) Map(java.util.Map)

Example 24 with Function

use of org.apache.spark.api.java.function.Function in project cdap by caskdata.

the class SparkCSVToSpaceProgram method run.

@Override
public void run(final JavaSparkExecutionContext sec) throws Exception {
    JavaSparkContext jsc = new JavaSparkContext();
    Map<String, String> fileSetArgs = new HashMap<>();
    final Metrics metrics = sec.getMetrics();
    FileSetArguments.addInputPath(fileSetArgs, sec.getRuntimeArguments().get("input.path"));
    JavaPairRDD<LongWritable, Text> input = sec.fromDataset(WorkflowAppWithLocalDatasets.CSV_FILESET_DATASET, fileSetArgs);
    final List<String> converted = input.values().map(new Function<Text, String>() {

        @Override
        public String call(Text input) throws Exception {
            String line = input.toString();
            metrics.count("num.lines", 1);
            return line.replaceAll(",", " ");
        }
    }).collect();
    sec.execute(new TxRunnable() {

        @Override
        public void run(DatasetContext context) throws Exception {
            Map<String, String> args = sec.getRuntimeArguments();
            String outputPath = args.get("output.path");
            Map<String, String> fileSetArgs = new HashMap<>();
            FileSetArguments.setOutputPath(fileSetArgs, outputPath);
            FileSet fileSet = context.getDataset(WorkflowAppWithLocalDatasets.CSV_FILESET_DATASET, fileSetArgs);
            try (PrintWriter writer = new PrintWriter(fileSet.getOutputLocation().getOutputStream())) {
                for (String line : converted) {
                    writer.write(line);
                    writer.println();
                }
            }
        }
    });
}
Also used : FileSet(co.cask.cdap.api.dataset.lib.FileSet) HashMap(java.util.HashMap) Text(org.apache.hadoop.io.Text) Function(org.apache.spark.api.java.function.Function) Metrics(co.cask.cdap.api.metrics.Metrics) TxRunnable(co.cask.cdap.api.TxRunnable) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) LongWritable(org.apache.hadoop.io.LongWritable) DatasetContext(co.cask.cdap.api.data.DatasetContext) HashMap(java.util.HashMap) Map(java.util.Map) PrintWriter(java.io.PrintWriter)

Example 25 with Function

use of org.apache.spark.api.java.function.Function in project cdap by caskdata.

the class SparkPageRankProgram method run.

@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
    JavaSparkContext jsc = new JavaSparkContext();
    LOG.info("Processing backlinkURLs data");
    JavaPairRDD<Long, String> backlinkURLs = sec.fromStream("backlinkURLStream", String.class);
    int iterationCount = getIterationCount(sec);
    LOG.info("Grouping data by key");
    // Grouping backlinks by unique URL in key
    JavaPairRDD<String, Iterable<String>> links = backlinkURLs.values().mapToPair(new PairFunction<String, String, String>() {

        @Override
        public Tuple2<String, String> call(String s) {
            String[] parts = SPACES.split(s);
            return new Tuple2<>(parts[0], parts[1]);
        }
    }).distinct().groupByKey().cache();
    // Initialize default rank for each key URL
    JavaPairRDD<String, Double> ranks = links.mapValues(new Function<Iterable<String>, Double>() {

        @Override
        public Double call(Iterable<String> rs) {
            return 1.0;
        }
    });
    // Calculates and updates URL ranks continuously using PageRank algorithm.
    for (int current = 0; current < iterationCount; current++) {
        LOG.debug("Processing data with PageRank algorithm. Iteration {}/{}", current + 1, (iterationCount));
        // Calculates URL contributions to the rank of other URLs.
        JavaPairRDD<String, Double> contribs = links.join(ranks).values().flatMapToPair(new PairFlatMapFunction<Tuple2<Iterable<String>, Double>, String, Double>() {

            @Override
            public Iterable<Tuple2<String, Double>> call(Tuple2<Iterable<String>, Double> s) {
                LOG.debug("Processing {} with rank {}", s._1(), s._2());
                int urlCount = Iterables.size(s._1());
                List<Tuple2<String, Double>> results = new ArrayList<>();
                for (String n : s._1()) {
                    results.add(new Tuple2<>(n, s._2() / urlCount));
                }
                return results;
            }
        });
        // Re-calculates URL ranks based on backlink contributions.
        ranks = contribs.reduceByKey(new Sum()).mapValues(new Function<Double, Double>() {

            @Override
            public Double call(Double sum) {
                return 0.15 + sum * 0.85;
            }
        });
    }
    LOG.info("Writing ranks data");
    final ServiceDiscoverer discoveryServiceContext = sec.getServiceDiscoverer();
    final Metrics sparkMetrics = sec.getMetrics();
    JavaPairRDD<byte[], Integer> ranksRaw = ranks.mapToPair(new PairFunction<Tuple2<String, Double>, byte[], Integer>() {

        @Override
        public Tuple2<byte[], Integer> call(Tuple2<String, Double> tuple) throws Exception {
            LOG.debug("URL {} has rank {}", Arrays.toString(tuple._1().getBytes(Charsets.UTF_8)), tuple._2());
            URL serviceURL = discoveryServiceContext.getServiceURL(SparkPageRankApp.SERVICE_HANDLERS);
            if (serviceURL == null) {
                throw new RuntimeException("Failed to discover service: " + SparkPageRankApp.SERVICE_HANDLERS);
            }
            try {
                URLConnection connection = new URL(serviceURL, String.format("%s/%s", SparkPageRankApp.SparkPageRankServiceHandler.TRANSFORM_PATH, tuple._2().toString())).openConnection();
                try (BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream(), Charsets.UTF_8))) {
                    String pr = reader.readLine();
                    if ((Integer.parseInt(pr)) == POPULAR_PAGE_THRESHOLD) {
                        sparkMetrics.count(POPULAR_PAGES, 1);
                    } else if (Integer.parseInt(pr) <= UNPOPULAR_PAGE_THRESHOLD) {
                        sparkMetrics.count(UNPOPULAR_PAGES, 1);
                    } else {
                        sparkMetrics.count(REGULAR_PAGES, 1);
                    }
                    return new Tuple2<>(tuple._1().getBytes(Charsets.UTF_8), Integer.parseInt(pr));
                }
            } catch (Exception e) {
                LOG.warn("Failed to read the Stream for service {}", SparkPageRankApp.SERVICE_HANDLERS, e);
                throw Throwables.propagate(e);
            }
        }
    });
    // Store calculated results in output Dataset.
    // All calculated results are stored in one row.
    // Each result, the calculated URL rank based on backlink contributions, is an entry of the row.
    // The value of the entry is the URL rank.
    sec.saveAsDataset(ranksRaw, "ranks");
    LOG.info("PageRanks successfuly computed and written to \"ranks\" dataset");
}
Also used : URL(java.net.URL) PairFlatMapFunction(org.apache.spark.api.java.function.PairFlatMapFunction) Function(org.apache.spark.api.java.function.Function) PairFunction(org.apache.spark.api.java.function.PairFunction) Metrics(co.cask.cdap.api.metrics.Metrics) ArrayList(java.util.ArrayList) List(java.util.List) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) PairFunction(org.apache.spark.api.java.function.PairFunction) ServiceDiscoverer(co.cask.cdap.api.ServiceDiscoverer) InputStreamReader(java.io.InputStreamReader) URLConnection(java.net.URLConnection) Tuple2(scala.Tuple2) BufferedReader(java.io.BufferedReader)

Aggregations

Function (org.apache.spark.api.java.function.Function)30 Tuple2 (scala.Tuple2)17 JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)15 ArrayList (java.util.ArrayList)11 PairFunction (org.apache.spark.api.java.function.PairFunction)9 JavaRDD (org.apache.spark.api.java.JavaRDD)8 List (java.util.List)7 SparkConf (org.apache.spark.SparkConf)6 JavaPairRDD (org.apache.spark.api.java.JavaPairRDD)6 VoidFunction (org.apache.spark.api.java.function.VoidFunction)5 JavaStreamingContext (org.apache.spark.streaming.api.java.JavaStreamingContext)5 IOException (java.io.IOException)4 FlatMapFunction (org.apache.spark.api.java.function.FlatMapFunction)4 WindowedValue (com.google.cloud.dataflow.sdk.util.WindowedValue)3 VariantContext (htsjdk.variant.variantcontext.VariantContext)3 HashMap (java.util.HashMap)3 Map (java.util.Map)3 Collectors (java.util.stream.Collectors)3 Function2 (org.apache.spark.api.java.function.Function2)3 PairFlatMapFunction (org.apache.spark.api.java.function.PairFlatMapFunction)3