Search in sources :

Example 66 with JavaSparkContext

use of org.apache.spark.api.java.JavaSparkContext in project cdap by caskdata.

the class NaiveBayesTrainer method run.

@Override
public void run(SparkExecutionPluginContext sparkContext, JavaRDD<StructuredRecord> input) throws Exception {
    Preconditions.checkArgument(input.count() != 0, "Input RDD is empty.");
    final HashingTF tf = new HashingTF(100);
    JavaRDD<LabeledPoint> trainingData = input.map(new Function<StructuredRecord, LabeledPoint>() {

        @Override
        public LabeledPoint call(StructuredRecord record) throws Exception {
            // should never happen, here to test app correctness in unit tests
            if (inputSchema != null && !inputSchema.equals(record.getSchema())) {
                throw new IllegalStateException("runtime schema does not match what was set at configure time.");
            }
            String text = record.get(config.fieldToClassify);
            return new LabeledPoint((Double) record.get(config.predictionField), tf.transform(Lists.newArrayList(text.split(" "))));
        }
    });
    trainingData.cache();
    final NaiveBayesModel model = NaiveBayes.train(trainingData.rdd(), 1.0);
    // save the model to a file in the output FileSet
    JavaSparkContext javaSparkContext = sparkContext.getSparkContext();
    FileSet outputFS = sparkContext.getDataset(config.fileSetName);
    model.save(JavaSparkContext.toSparkContext(javaSparkContext), outputFS.getBaseLocation().append(config.path).toURI().getPath());
    JavaPairRDD<Long, String> textsToClassify = sparkContext.fromStream(TEXTS_TO_CLASSIFY, String.class);
    JavaRDD<Vector> featuresToClassify = textsToClassify.map(new Function<Tuple2<Long, String>, Vector>() {

        @Override
        public Vector call(Tuple2<Long, String> longWritableTextTuple2) throws Exception {
            String text = longWritableTextTuple2._2();
            return tf.transform(Lists.newArrayList(text.split(" ")));
        }
    });
    JavaRDD<Double> predict = model.predict(featuresToClassify);
    LOG.info("Predictions: {}", predict.collect());
    // key the predictions with the message
    JavaPairRDD<String, Double> keyedPredictions = textsToClassify.values().zip(predict);
    // convert to byte[],byte[] to write to data
    JavaPairRDD<byte[], byte[]> bytesRDD = keyedPredictions.mapToPair(new PairFunction<Tuple2<String, Double>, byte[], byte[]>() {

        @Override
        public Tuple2<byte[], byte[]> call(Tuple2<String, Double> tuple) throws Exception {
            return new Tuple2<>(Bytes.toBytes(tuple._1()), Bytes.toBytes(tuple._2()));
        }
    });
    sparkContext.saveAsDataset(bytesRDD, CLASSIFIED_TEXTS);
}
Also used : LabeledPoint(org.apache.spark.mllib.regression.LabeledPoint) NaiveBayesModel(org.apache.spark.mllib.classification.NaiveBayesModel) StructuredRecord(co.cask.cdap.api.data.format.StructuredRecord) HashingTF(org.apache.spark.mllib.feature.HashingTF) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) Vector(org.apache.spark.mllib.linalg.Vector) FileSet(co.cask.cdap.api.dataset.lib.FileSet) Tuple2(scala.Tuple2)

Example 67 with JavaSparkContext

use of org.apache.spark.api.java.JavaSparkContext in project cdap by caskdata.

the class FakeSpark method run.

@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
    JavaSparkContext jsc = new JavaSparkContext();
    LOG.info("HelloFakeSpark");
    List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
    LOG.info("Collected: {}", jsc.parallelize(data).collect());
}
Also used : JavaSparkContext(org.apache.spark.api.java.JavaSparkContext)

Example 68 with JavaSparkContext

use of org.apache.spark.api.java.JavaSparkContext in project gatk by broadinstitute.

the class CountBasesSpark method runTool.

@Override
protected void runTool(final JavaSparkContext ctx) {
    final JavaRDD<GATKRead> reads = getReads();
    final long count = reads.map(r -> (long) r.getLength()).reduce(Long::sum);
    System.out.println(count);
    if (out != null) {
        try (final PrintStream ps = new PrintStream(BucketUtils.createFile(out))) {
            ps.print(count);
        }
    }
}
Also used : GATKRead(org.broadinstitute.hellbender.utils.read.GATKRead) DocumentedFeature(org.broadinstitute.barclay.help.DocumentedFeature) PrintStream(java.io.PrintStream) CommandLineProgramProperties(org.broadinstitute.barclay.argparser.CommandLineProgramProperties) BucketUtils(org.broadinstitute.hellbender.utils.gcs.BucketUtils) SparkProgramGroup(org.broadinstitute.hellbender.cmdline.programgroups.SparkProgramGroup) Argument(org.broadinstitute.barclay.argparser.Argument) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) GATKSparkTool(org.broadinstitute.hellbender.engine.spark.GATKSparkTool) StandardArgumentDefinitions(org.broadinstitute.hellbender.cmdline.StandardArgumentDefinitions) GATKRead(org.broadinstitute.hellbender.utils.read.GATKRead) JavaRDD(org.apache.spark.api.java.JavaRDD) PrintStream(java.io.PrintStream)

Example 69 with JavaSparkContext

use of org.apache.spark.api.java.JavaSparkContext in project gatk by broadinstitute.

the class CollectMultipleMetricsSpark method runTool.

@Override
protected void runTool(final JavaSparkContext ctx) {
    final JavaRDD<GATKRead> unFilteredReads = getUnfilteredReads();
    List<SparkCollectorProvider> collectorsToRun = getCollectorsToRun();
    if (collectorsToRun.size() > 1) {
        // if there is more than one collector to run, cache the
        // unfiltered RDD so we don't recompute it
        unFilteredReads.cache();
    }
    for (final SparkCollectorProvider provider : collectorsToRun) {
        MetricsCollectorSpark<? extends MetricsArgumentCollection> metricsCollector = provider.createCollector(outputBaseName, metricAccumulationLevel.accumulationLevels, getDefaultHeaders(), getHeaderForReads());
        validateCollector(metricsCollector, collectorsToRun.get(collectorsToRun.indexOf(provider)).getClass().getName());
        // Execute the collector's lifecycle
        //Bypass the framework merging of command line filters and just apply the default
        //ones specified by the collector
        ReadFilter readFilter = ReadFilter.fromList(metricsCollector.getDefaultReadFilters(), getHeaderForReads());
        metricsCollector.collectMetrics(unFilteredReads.filter(r -> readFilter.test(r)), getHeaderForReads());
        metricsCollector.saveMetrics(getReadSourceName(), getAuthHolder());
    }
}
Also used : GATKRead(org.broadinstitute.hellbender.utils.read.GATKRead) DocumentedFeature(org.broadinstitute.barclay.help.DocumentedFeature) CommandLineProgramProperties(org.broadinstitute.barclay.argparser.CommandLineProgramProperties) java.util(java.util) SparkProgramGroup(org.broadinstitute.hellbender.cmdline.programgroups.SparkProgramGroup) Header(htsjdk.samtools.metrics.Header) Argument(org.broadinstitute.barclay.argparser.Argument) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) GATKSparkTool(org.broadinstitute.hellbender.engine.spark.GATKSparkTool) StandardArgumentDefinitions(org.broadinstitute.hellbender.cmdline.StandardArgumentDefinitions) ArgumentCollection(org.broadinstitute.barclay.argparser.ArgumentCollection) ReadFilter(org.broadinstitute.hellbender.engine.filters.ReadFilter) GATKRead(org.broadinstitute.hellbender.utils.read.GATKRead) SAMFileHeader(htsjdk.samtools.SAMFileHeader) ReadUtils(org.broadinstitute.hellbender.utils.read.ReadUtils) org.broadinstitute.hellbender.metrics(org.broadinstitute.hellbender.metrics) MetricAccumulationLevelArgumentCollection(org.broadinstitute.hellbender.cmdline.argumentcollections.MetricAccumulationLevelArgumentCollection) JavaRDD(org.apache.spark.api.java.JavaRDD) ReadFilter(org.broadinstitute.hellbender.engine.filters.ReadFilter)

Example 70 with JavaSparkContext

use of org.apache.spark.api.java.JavaSparkContext in project gatk-protected by broadinstitute.

the class HDF5PCACoveragePoNCreationUtilsUnitTest method testCalculateReducedPanelAndPInversesKeepingHalfOfAllColumns.

@Test(dataProvider = "readCountOnlyWithDiverseShapeData")
public void testCalculateReducedPanelAndPInversesKeepingHalfOfAllColumns(final ReadCountCollection readCounts) {
    final JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
    final ReductionResult result = HDF5PCACoveragePoNCreationUtils.calculateReducedPanelAndPInverses(readCounts, OptionalInt.of(readCounts.columnNames().size() / 2), NULL_LOGGER, ctx);
    final RealMatrix counts = readCounts.counts();
    Assert.assertNotNull(result);
    Assert.assertNotNull(result.getPseudoInverse());
    Assert.assertNotNull(result.getReducedCounts());
    Assert.assertNotNull(result.getReducedPseudoInverse());
    Assert.assertNotNull(result.getAllSingularValues());
    Assert.assertEquals(counts.getColumnDimension(), result.getAllSingularValues().length);
    Assert.assertEquals(result.getReducedCounts().getRowDimension(), counts.getRowDimension());
    Assert.assertEquals(result.getReducedCounts().getColumnDimension(), readCounts.columnNames().size() / 2);
    final int eigensamples = result.getReducedCounts().getColumnDimension();
    Assert.assertEquals(eigensamples, readCounts.columnNames().size() / 2);
    assertPseudoInverse(counts, result.getPseudoInverse());
    assertPseudoInverse(result.getReducedCounts(), result.getReducedPseudoInverse());
}
Also used : Array2DRowRealMatrix(org.apache.commons.math3.linear.Array2DRowRealMatrix) RealMatrix(org.apache.commons.math3.linear.RealMatrix) JavaSparkContext(org.apache.spark.api.java.JavaSparkContext) BaseTest(org.broadinstitute.hellbender.utils.test.BaseTest) Test(org.testng.annotations.Test)

Aggregations

JavaSparkContext (org.apache.spark.api.java.JavaSparkContext)260 Test (org.testng.annotations.Test)65 BaseTest (org.broadinstitute.hellbender.utils.test.BaseTest)64 SparkConf (org.apache.spark.SparkConf)49 Tuple2 (scala.Tuple2)48 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)43 GATKRead (org.broadinstitute.hellbender.utils.read.GATKRead)32 List (java.util.List)28 Configuration (org.apache.hadoop.conf.Configuration)24 JavaRDD (org.apache.spark.api.java.JavaRDD)24 File (java.io.File)23 SimpleInterval (org.broadinstitute.hellbender.utils.SimpleInterval)20 Collectors (java.util.stream.Collectors)16 TextPipeline (org.deeplearning4j.spark.text.functions.TextPipeline)15 DataSet (org.nd4j.linalg.dataset.DataSet)15 IOException (java.io.IOException)14 SAMFileHeader (htsjdk.samtools.SAMFileHeader)12 HashSet (java.util.HashSet)12 RealMatrix (org.apache.commons.math3.linear.RealMatrix)12