use of scala.Tuple2 in project gatk-protected by broadinstitute.
the class CoverageModelEMWorkspace method updateCopyRatioPosteriorExpectationsSpark.
/**
* The Spark implementation of the E-step update of copy ratio posteriors
*
* @return a {@link SubroutineSignal} containing the update size
*/
@EvaluatesRDD
@UpdatesRDD
@CachesRDD
private SubroutineSignal updateCopyRatioPosteriorExpectationsSpark(final double admixingRatio) {
/* local final member variables for lambda capture */
final List<LinearlySpacedIndexBlock> targetBlocks = new ArrayList<>();
targetBlocks.addAll(this.targetBlocks);
final List<Target> targetList = new ArrayList<>();
targetList.addAll(processedTargetList);
final List<String> sampleNameList = new ArrayList<>();
sampleNameList.addAll(processedSampleNameList);
final List<SexGenotypeData> sampleSexGenotypeData = new ArrayList<>();
sampleSexGenotypeData.addAll(processedSampleSexGenotypeData);
final int numTargetBlocks = targetBlocks.size();
final CopyRatioExpectationsCalculator<CoverageModelCopyRatioEmissionData, STATE> calculator = this.copyRatioExpectationsCalculator;
final INDArray sampleReadDepths = Transforms.exp(sampleMeanLogReadDepths, true);
/* make an RDD of copy ratio posterior expectations */
final JavaPairRDD<Integer, CopyRatioExpectations> copyRatioPosteriorExpectationsPairRDD = /* fetch copy ratio emission data from workers */
fetchCopyRatioEmissionDataSpark().mapPartitionsToPair(it -> {
final List<Tuple2<Integer, CopyRatioExpectations>> newPartitionData = new ArrayList<>();
while (it.hasNext()) {
final Tuple2<Integer, List<CoverageModelCopyRatioEmissionData>> prevDatum = it.next();
final int si = prevDatum._1;
final CopyRatioCallingMetadata copyRatioCallingMetadata = CopyRatioCallingMetadata.builder().sampleName(sampleNameList.get(si)).sampleSexGenotypeData(sampleSexGenotypeData.get(si)).sampleCoverageDepth(sampleReadDepths.getDouble(si)).emissionCalculationStrategy(EmissionCalculationStrategy.HYBRID_POISSON_GAUSSIAN).build();
newPartitionData.add(new Tuple2<>(prevDatum._1, calculator.getCopyRatioPosteriorExpectations(copyRatioCallingMetadata, targetList, prevDatum._2)));
}
return newPartitionData.iterator();
}, true);
/* we need to do two things to copyRatioPosteriorExpectationsPairRDD; so we cache it */
/* step 1. update log chain posterior expectation on the driver node */
final double[] newSampleLogChainPosteriors = copyRatioPosteriorExpectationsPairRDD.mapValues(CopyRatioExpectations::getLogChainPosteriorProbability).collect().stream().sorted(Comparator.comparingInt(t -> t._1)).mapToDouble(t -> t._2).toArray();
sampleLogChainPosteriors.assign(Nd4j.create(newSampleLogChainPosteriors, new int[] { numSamples, 1 }));
/* step 2. repartition in target space */
final JavaPairRDD<LinearlySpacedIndexBlock, ImmutablePair<INDArray, INDArray>> blockifiedCopyRatioPosteriorResultsPairRDD = copyRatioPosteriorExpectationsPairRDD.flatMapToPair(dat -> targetBlocks.stream().map(tb -> new Tuple2<>(tb, new Tuple2<>(dat._1, ImmutablePair.of(dat._2.getLogCopyRatioMeans(tb), dat._2.getLogCopyRatioVariances(tb))))).iterator()).combineByKey(/* recipe to create an singleton list */
Collections::singletonList, /* recipe to add an element to the list */
(list, element) -> Stream.concat(list.stream(), Stream.of(element)).collect(Collectors.toList()), /* recipe to concatenate two lists */
(list1, list2) -> Stream.concat(list1.stream(), list2.stream()).collect(Collectors.toList()), /* repartition with respect to target space blocks */
new HashPartitioner(numTargetBlocks)).mapValues(list -> list.stream().sorted(Comparator.comparingInt(t -> t._1)).map(p -> p._2).map(t -> ImmutablePair.of(Nd4j.create(t.left), Nd4j.create(t.right))).collect(Collectors.toList())).mapValues(CoverageModelEMWorkspace::stackCopyRatioPosteriorDataForAllSamples);
/* we do not need copy ratio expectations anymore */
copyRatioPosteriorExpectationsPairRDD.unpersist();
/* step 3. merge with computeRDD and update */
computeRDD = computeRDD.join(blockifiedCopyRatioPosteriorResultsPairRDD).mapValues(t -> t._1.cloneWithUpdatedCopyRatioPosteriors(t._2.left, t._2.right, admixingRatio));
cacheWorkers("after E-step for copy ratio update");
/* collect subroutine signals */
final List<SubroutineSignal> sigs = mapWorkersAndCollect(CoverageModelEMComputeBlock::getLatestMStepSignal);
final double errorNormInfinity = Collections.max(sigs.stream().map(sig -> sig.<Double>get(StandardSubroutineSignals.RESIDUAL_ERROR_NORM)).collect(Collectors.toList()));
return SubroutineSignal.builder().put(StandardSubroutineSignals.RESIDUAL_ERROR_NORM, errorNormInfinity).build();
}
use of scala.Tuple2 in project cdap by caskdata.
the class NaiveBayesTrainer method run.
@Override
public void run(SparkExecutionPluginContext sparkContext, JavaRDD<StructuredRecord> input) throws Exception {
Preconditions.checkArgument(input.count() != 0, "Input RDD is empty.");
final HashingTF tf = new HashingTF(100);
JavaRDD<LabeledPoint> trainingData = input.map(new Function<StructuredRecord, LabeledPoint>() {
@Override
public LabeledPoint call(StructuredRecord record) throws Exception {
// should never happen, here to test app correctness in unit tests
if (inputSchema != null && !inputSchema.equals(record.getSchema())) {
throw new IllegalStateException("runtime schema does not match what was set at configure time.");
}
String text = record.get(config.fieldToClassify);
return new LabeledPoint((Double) record.get(config.predictionField), tf.transform(Lists.newArrayList(text.split(" "))));
}
});
trainingData.cache();
final NaiveBayesModel model = NaiveBayes.train(trainingData.rdd(), 1.0);
// save the model to a file in the output FileSet
JavaSparkContext javaSparkContext = sparkContext.getSparkContext();
FileSet outputFS = sparkContext.getDataset(config.fileSetName);
model.save(JavaSparkContext.toSparkContext(javaSparkContext), outputFS.getBaseLocation().append(config.path).toURI().getPath());
JavaPairRDD<Long, String> textsToClassify = sparkContext.fromStream(TEXTS_TO_CLASSIFY, String.class);
JavaRDD<Vector> featuresToClassify = textsToClassify.map(new Function<Tuple2<Long, String>, Vector>() {
@Override
public Vector call(Tuple2<Long, String> longWritableTextTuple2) throws Exception {
String text = longWritableTextTuple2._2();
return tf.transform(Lists.newArrayList(text.split(" ")));
}
});
JavaRDD<Double> predict = model.predict(featuresToClassify);
LOG.info("Predictions: {}", predict.collect());
// key the predictions with the message
JavaPairRDD<String, Double> keyedPredictions = textsToClassify.values().zip(predict);
// convert to byte[],byte[] to write to data
JavaPairRDD<byte[], byte[]> bytesRDD = keyedPredictions.mapToPair(new PairFunction<Tuple2<String, Double>, byte[], byte[]>() {
@Override
public Tuple2<byte[], byte[]> call(Tuple2<String, Double> tuple) throws Exception {
return new Tuple2<>(Bytes.toBytes(tuple._1()), Bytes.toBytes(tuple._2()));
}
});
sparkContext.saveAsDataset(bytesRDD, CLASSIFIED_TEXTS);
}
use of scala.Tuple2 in project cdap by caskdata.
the class WordCountSink method run.
@Override
public void run(SparkExecutionPluginContext sparkExecutionPluginContext, JavaRDD<StructuredRecord> javaRDD) throws Exception {
WordCount wordCount = new WordCount(config.field);
JavaPairRDD outputRDD = wordCount.countWords(javaRDD).mapToPair(new PairFunction<Tuple2<String, Long>, byte[], byte[]>() {
@Override
public Tuple2<byte[], byte[]> call(Tuple2<String, Long> stringLongTuple2) throws Exception {
return new Tuple2<>(Bytes.toBytes(stringLongTuple2._1()), Bytes.toBytes(stringLongTuple2._2()));
}
});
sparkExecutionPluginContext.saveAsDataset(outputRDD, config.tableName);
}
use of scala.Tuple2 in project cdap by caskdata.
the class SparkPipelineRunner method runPipeline.
public void runPipeline(PipelinePhase pipelinePhase, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PipelinePluginContext pluginContext) throws Exception {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), sec.getLogicalStartTime(), sec, sec.getNamespace());
Map<String, SparkCollection<Object>> stageDataCollections = new HashMap<>();
Map<String, SparkCollection<ErrorRecord<Object>>> stageErrorCollections = new HashMap<>();
// should never happen, but removes warning
if (pipelinePhase.getDag() == null) {
throw new IllegalStateException("Pipeline phase has no connections.");
}
for (String stageName : pipelinePhase.getDag().getTopologicalOrder()) {
StageInfo stageInfo = pipelinePhase.getStage(stageName);
//noinspection ConstantConditions
String pluginType = stageInfo.getPluginType();
// don't want to do an additional filter for stages that can emit errors,
// but aren't connected to an ErrorTransform
boolean hasErrorOutput = false;
Set<String> outputs = pipelinePhase.getStageOutputs(stageInfo.getName());
for (String output : outputs) {
//noinspection ConstantConditions
if (ErrorTransform.PLUGIN_TYPE.equals(pipelinePhase.getStage(output).getPluginType())) {
hasErrorOutput = true;
break;
}
}
SparkCollection<Object> stageData = null;
Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
Set<String> stageInputs = stageInfo.getInputs();
for (String inputStageName : stageInputs) {
inputDataCollections.put(inputStageName, stageDataCollections.get(inputStageName));
}
// initialize the stageRDD as the union of all input RDDs.
if (!inputDataCollections.isEmpty()) {
Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
stageData = inputCollectionIter.next();
// don't union inputs records if we're joining or if we're processing errors
while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
stageData = stageData.union(inputCollectionIter.next());
}
}
SparkCollection<ErrorRecord<Object>> stageErrors = null;
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageInfo, sec);
if (stageData == null) {
// null in the other else-if conditions
if (sourcePluginType.equals(pluginType)) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = getSource(stageInfo);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else {
throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
}
} else if (BatchSink.PLUGIN_TYPE.equals(pluginType)) {
stageData.store(stageInfo, Compat.convert(new BatchSinkFunction(pluginFunctionContext)));
} else if (Transform.PLUGIN_TYPE.equals(pluginType)) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.transform(stageInfo);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
// union all the errors coming into this stage
SparkCollection<ErrorRecord<Object>> inputErrors = null;
for (String inputStage : stageInputs) {
SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = stageErrorCollections.get(inputStage);
if (inputErrorsFromStage == null) {
continue;
}
if (inputErrors == null) {
inputErrors = inputErrorsFromStage;
} else {
inputErrors = inputErrors.union(inputErrorsFromStage);
}
}
if (inputErrors != null) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = inputErrors.flatMap(stageInfo, Compat.convert(new ErrorTransformFunction<>(pluginFunctionContext)));
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
}
} else if (SparkCompute.PLUGIN_TYPE.equals(pluginType)) {
SparkCompute<Object, Object> sparkCompute = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData = stageData.compute(stageInfo, sparkCompute);
} else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData.store(stageInfo, sparkSink);
} else if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
Integer partitions = stagePartitions.get(stageName);
SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.aggregate(stageInfo, partitions);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
BatchJoiner<Object, Object, Object> joiner = pluginContext.newPluginInstance(stageName, macroEvaluator);
BatchJoinerRuntimeContext joinerRuntimeContext = pluginFunctionContext.createBatchRuntimeContext();
joiner.initialize(joinerRuntimeContext);
Map<String, SparkPairCollection<Object, Object>> preJoinStreams = new HashMap<>();
for (Map.Entry<String, SparkCollection<Object>> inputStreamEntry : inputDataCollections.entrySet()) {
String inputStage = inputStreamEntry.getKey();
SparkCollection<Object> inputStream = inputStreamEntry.getValue();
preJoinStreams.put(inputStage, addJoinKey(stageInfo, inputStage, inputStream));
}
Set<String> remainingInputs = new HashSet<>();
remainingInputs.addAll(inputDataCollections.keySet());
Integer numPartitions = stagePartitions.get(stageName);
SparkPairCollection<Object, List<JoinElement<Object>>> joinedInputs = null;
// inner join on required inputs
for (final String inputStageName : joiner.getJoinConfig().getRequiredInputs()) {
SparkPairCollection<Object, Object> preJoinCollection = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinCollection.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
JoinFlattenFunction<Object> joinFlattenFunction = new JoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.join(preJoinCollection).mapValues(joinFlattenFunction) : joinedInputs.join(preJoinCollection, numPartitions).mapValues(joinFlattenFunction);
}
remainingInputs.remove(inputStageName);
}
// outer join on non-required inputs
boolean isFullOuter = joinedInputs == null;
for (final String inputStageName : remainingInputs) {
SparkPairCollection<Object, Object> preJoinStream = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinStream.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
if (isFullOuter) {
OuterJoinFlattenFunction<Object> flattenFunction = new OuterJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.fullOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.fullOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
} else {
LeftJoinFlattenFunction<Object> flattenFunction = new LeftJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.leftOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.leftOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
}
}
}
// should never happen, but removes warnings
if (joinedInputs == null) {
throw new IllegalStateException("There are no inputs into join stage " + stageName);
}
stageData = mergeJoinResults(stageInfo, joinedInputs).cache();
} else if (Windower.PLUGIN_TYPE.equals(pluginType)) {
Windower windower = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData = stageData.window(stageInfo, windower);
} else {
throw new IllegalStateException(String.format("Stage %s is of unsupported plugin type %s.", stageName, pluginType));
}
if (shouldCache(pipelinePhase, stageInfo)) {
stageData = stageData.cache();
if (stageErrors != null) {
stageErrors = stageErrors.cache();
}
}
stageDataCollections.put(stageName, stageData);
stageErrorCollections.put(stageName, stageErrors);
}
}
use of scala.Tuple2 in project cdap by caskdata.
the class SparkPageRankProgram method run.
@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
JavaSparkContext jsc = new JavaSparkContext();
LOG.info("Processing backlinkURLs data");
JavaPairRDD<Long, String> backlinkURLs = sec.fromStream("backlinkURLStream", String.class);
int iterationCount = getIterationCount(sec);
LOG.info("Grouping data by key");
// Grouping backlinks by unique URL in key
JavaPairRDD<String, Iterable<String>> links = backlinkURLs.values().mapToPair(new PairFunction<String, String, String>() {
@Override
public Tuple2<String, String> call(String s) {
String[] parts = SPACES.split(s);
return new Tuple2<>(parts[0], parts[1]);
}
}).distinct().groupByKey().cache();
// Initialize default rank for each key URL
JavaPairRDD<String, Double> ranks = links.mapValues(new Function<Iterable<String>, Double>() {
@Override
public Double call(Iterable<String> rs) {
return 1.0;
}
});
// Calculates and updates URL ranks continuously using PageRank algorithm.
for (int current = 0; current < iterationCount; current++) {
LOG.debug("Processing data with PageRank algorithm. Iteration {}/{}", current + 1, (iterationCount));
// Calculates URL contributions to the rank of other URLs.
JavaPairRDD<String, Double> contribs = links.join(ranks).values().flatMapToPair(new PairFlatMapFunction<Tuple2<Iterable<String>, Double>, String, Double>() {
@Override
public Iterable<Tuple2<String, Double>> call(Tuple2<Iterable<String>, Double> s) {
LOG.debug("Processing {} with rank {}", s._1(), s._2());
int urlCount = Iterables.size(s._1());
List<Tuple2<String, Double>> results = new ArrayList<>();
for (String n : s._1()) {
results.add(new Tuple2<>(n, s._2() / urlCount));
}
return results;
}
});
// Re-calculates URL ranks based on backlink contributions.
ranks = contribs.reduceByKey(new Sum()).mapValues(new Function<Double, Double>() {
@Override
public Double call(Double sum) {
return 0.15 + sum * 0.85;
}
});
}
LOG.info("Writing ranks data");
final ServiceDiscoverer discoveryServiceContext = sec.getServiceDiscoverer();
final Metrics sparkMetrics = sec.getMetrics();
JavaPairRDD<byte[], Integer> ranksRaw = ranks.mapToPair(new PairFunction<Tuple2<String, Double>, byte[], Integer>() {
@Override
public Tuple2<byte[], Integer> call(Tuple2<String, Double> tuple) throws Exception {
LOG.debug("URL {} has rank {}", Arrays.toString(tuple._1().getBytes(Charsets.UTF_8)), tuple._2());
URL serviceURL = discoveryServiceContext.getServiceURL(SparkPageRankApp.SERVICE_HANDLERS);
if (serviceURL == null) {
throw new RuntimeException("Failed to discover service: " + SparkPageRankApp.SERVICE_HANDLERS);
}
try {
URLConnection connection = new URL(serviceURL, String.format("%s/%s", SparkPageRankApp.SparkPageRankServiceHandler.TRANSFORM_PATH, tuple._2().toString())).openConnection();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream(), Charsets.UTF_8))) {
String pr = reader.readLine();
if ((Integer.parseInt(pr)) == POPULAR_PAGE_THRESHOLD) {
sparkMetrics.count(POPULAR_PAGES, 1);
} else if (Integer.parseInt(pr) <= UNPOPULAR_PAGE_THRESHOLD) {
sparkMetrics.count(UNPOPULAR_PAGES, 1);
} else {
sparkMetrics.count(REGULAR_PAGES, 1);
}
return new Tuple2<>(tuple._1().getBytes(Charsets.UTF_8), Integer.parseInt(pr));
}
} catch (Exception e) {
LOG.warn("Failed to read the Stream for service {}", SparkPageRankApp.SERVICE_HANDLERS, e);
throw Throwables.propagate(e);
}
}
});
// Store calculated results in output Dataset.
// All calculated results are stored in one row.
// Each result, the calculated URL rank based on backlink contributions, is an entry of the row.
// The value of the entry is the URL rank.
sec.saveAsDataset(ranksRaw, "ranks");
LOG.info("PageRanks successfuly computed and written to \"ranks\" dataset");
}
Aggregations