use of scala.Tuple2 in project spark-dataflow by cloudera.
the class TransformTranslator method writeText.
private static <T> TransformEvaluator<TextIO.Write.Bound<T>> writeText() {
return new TransformEvaluator<TextIO.Write.Bound<T>>() {
@Override
public void evaluate(TextIO.Write.Bound<T> transform, EvaluationContext context) {
@SuppressWarnings("unchecked") JavaPairRDD<T, Void> last = ((JavaRDDLike<WindowedValue<T>, ?>) context.getInputRDD(transform)).map(WindowingHelpers.<T>unwindowFunction()).mapToPair(new PairFunction<T, T, Void>() {
@Override
public Tuple2<T, Void> call(T t) throws Exception {
return new Tuple2<>(t, null);
}
});
ShardTemplateInformation shardTemplateInfo = new ShardTemplateInformation(transform.getNumShards(), transform.getShardTemplate(), transform.getFilenamePrefix(), transform.getFilenameSuffix());
writeHadoopFile(last, new Configuration(), shardTemplateInfo, Text.class, NullWritable.class, TemplatedTextOutputFormat.class);
}
};
}
use of scala.Tuple2 in project spark-dataflow by cloudera.
the class StreamingTransformTranslator method kafka.
private static <K, V> TransformEvaluator<KafkaIO.Read.Unbound<K, V>> kafka() {
return new TransformEvaluator<KafkaIO.Read.Unbound<K, V>>() {
@Override
public void evaluate(KafkaIO.Read.Unbound<K, V> transform, EvaluationContext context) {
StreamingEvaluationContext sec = (StreamingEvaluationContext) context;
JavaStreamingContext jssc = sec.getStreamingContext();
Class<K> keyClazz = transform.getKeyClass();
Class<V> valueClazz = transform.getValueClass();
Class<? extends Decoder<K>> keyDecoderClazz = transform.getKeyDecoderClass();
Class<? extends Decoder<V>> valueDecoderClazz = transform.getValueDecoderClass();
Map<String, String> kafkaParams = transform.getKafkaParams();
Set<String> topics = transform.getTopics();
JavaPairInputDStream<K, V> inputPairStream = KafkaUtils.createDirectStream(jssc, keyClazz, valueClazz, keyDecoderClazz, valueDecoderClazz, kafkaParams, topics);
JavaDStream<WindowedValue<KV<K, V>>> inputStream = inputPairStream.map(new Function<Tuple2<K, V>, KV<K, V>>() {
@Override
public KV<K, V> call(Tuple2<K, V> t2) throws Exception {
return KV.of(t2._1(), t2._2());
}
}).map(WindowingHelpers.<KV<K, V>>windowFunction());
sec.setStream(transform, inputStream);
}
};
}
use of scala.Tuple2 in project cdap by caskdata.
the class SparkPageRankProgram method run.
@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
JavaSparkContext jsc = new JavaSparkContext();
LOG.info("Processing backlinkURLs data");
JavaPairRDD<Long, String> backlinkURLs = sec.fromStream("backlinkURLStream", String.class);
int iterationCount = getIterationCount(sec);
LOG.info("Grouping data by key");
// Grouping backlinks by unique URL in key
JavaPairRDD<String, Iterable<String>> links = backlinkURLs.values().mapToPair(new PairFunction<String, String, String>() {
@Override
public Tuple2<String, String> call(String s) {
String[] parts = SPACES.split(s);
return new Tuple2<>(parts[0], parts[1]);
}
}).distinct().groupByKey().cache();
// Initialize default rank for each key URL
JavaPairRDD<String, Double> ranks = links.mapValues(new Function<Iterable<String>, Double>() {
@Override
public Double call(Iterable<String> rs) {
return 1.0;
}
});
// Calculates and updates URL ranks continuously using PageRank algorithm.
for (int current = 0; current < iterationCount; current++) {
LOG.debug("Processing data with PageRank algorithm. Iteration {}/{}", current + 1, (iterationCount));
// Calculates URL contributions to the rank of other URLs.
JavaPairRDD<String, Double> contribs = links.join(ranks).values().flatMapToPair(new PairFlatMapFunction<Tuple2<Iterable<String>, Double>, String, Double>() {
@Override
public Iterable<Tuple2<String, Double>> call(Tuple2<Iterable<String>, Double> s) {
LOG.debug("Processing {} with rank {}", s._1(), s._2());
int urlCount = Iterables.size(s._1());
List<Tuple2<String, Double>> results = new ArrayList<>();
for (String n : s._1()) {
results.add(new Tuple2<>(n, s._2() / urlCount));
}
return results;
}
});
// Re-calculates URL ranks based on backlink contributions.
ranks = contribs.reduceByKey(new Sum()).mapValues(new Function<Double, Double>() {
@Override
public Double call(Double sum) {
return 0.15 + sum * 0.85;
}
});
}
LOG.info("Writing ranks data");
final ServiceDiscoverer discoveryServiceContext = sec.getServiceDiscoverer();
final Metrics sparkMetrics = sec.getMetrics();
JavaPairRDD<byte[], Integer> ranksRaw = ranks.mapToPair(new PairFunction<Tuple2<String, Double>, byte[], Integer>() {
@Override
public Tuple2<byte[], Integer> call(Tuple2<String, Double> tuple) throws Exception {
LOG.debug("URL {} has rank {}", Arrays.toString(tuple._1().getBytes(Charsets.UTF_8)), tuple._2());
URL serviceURL = discoveryServiceContext.getServiceURL(SparkPageRankApp.SERVICE_HANDLERS);
if (serviceURL == null) {
throw new RuntimeException("Failed to discover service: " + SparkPageRankApp.SERVICE_HANDLERS);
}
try {
URLConnection connection = new URL(serviceURL, String.format("%s/%s", SparkPageRankApp.SparkPageRankServiceHandler.TRANSFORM_PATH, tuple._2().toString())).openConnection();
try (BufferedReader reader = new BufferedReader(new InputStreamReader(connection.getInputStream(), Charsets.UTF_8))) {
String pr = reader.readLine();
if ((Integer.parseInt(pr)) == POPULAR_PAGE_THRESHOLD) {
sparkMetrics.count(POPULAR_PAGES, 1);
} else if (Integer.parseInt(pr) <= UNPOPULAR_PAGE_THRESHOLD) {
sparkMetrics.count(UNPOPULAR_PAGES, 1);
} else {
sparkMetrics.count(REGULAR_PAGES, 1);
}
return new Tuple2<>(tuple._1().getBytes(Charsets.UTF_8), Integer.parseInt(pr));
}
} catch (Exception e) {
LOG.warn("Failed to read the Stream for service {}", SparkPageRankApp.SERVICE_HANDLERS, e);
throw Throwables.propagate(e);
}
}
});
// Store calculated results in output Dataset.
// All calculated results are stored in one row.
// Each result, the calculated URL rank based on backlink contributions, is an entry of the row.
// The value of the entry is the URL rank.
sec.saveAsDataset(ranksRaw, "ranks");
LOG.info("PageRanks successfuly computed and written to \"ranks\" dataset");
}
use of scala.Tuple2 in project cdap by caskdata.
the class SparkPipelineRunner method runPipeline.
public void runPipeline(PipelinePhase pipelinePhase, String sourcePluginType, JavaSparkExecutionContext sec, Map<String, Integer> stagePartitions, PipelinePluginContext pluginContext) throws Exception {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(sec.getWorkflowToken(), sec.getRuntimeArguments(), sec.getLogicalStartTime(), sec, sec.getNamespace());
Map<String, SparkCollection<Object>> stageDataCollections = new HashMap<>();
Map<String, SparkCollection<ErrorRecord<Object>>> stageErrorCollections = new HashMap<>();
// should never happen, but removes warning
if (pipelinePhase.getDag() == null) {
throw new IllegalStateException("Pipeline phase has no connections.");
}
for (String stageName : pipelinePhase.getDag().getTopologicalOrder()) {
StageInfo stageInfo = pipelinePhase.getStage(stageName);
//noinspection ConstantConditions
String pluginType = stageInfo.getPluginType();
// don't want to do an additional filter for stages that can emit errors,
// but aren't connected to an ErrorTransform
boolean hasErrorOutput = false;
Set<String> outputs = pipelinePhase.getStageOutputs(stageInfo.getName());
for (String output : outputs) {
//noinspection ConstantConditions
if (ErrorTransform.PLUGIN_TYPE.equals(pipelinePhase.getStage(output).getPluginType())) {
hasErrorOutput = true;
break;
}
}
SparkCollection<Object> stageData = null;
Map<String, SparkCollection<Object>> inputDataCollections = new HashMap<>();
Set<String> stageInputs = stageInfo.getInputs();
for (String inputStageName : stageInputs) {
inputDataCollections.put(inputStageName, stageDataCollections.get(inputStageName));
}
// initialize the stageRDD as the union of all input RDDs.
if (!inputDataCollections.isEmpty()) {
Iterator<SparkCollection<Object>> inputCollectionIter = inputDataCollections.values().iterator();
stageData = inputCollectionIter.next();
// don't union inputs records if we're joining or if we're processing errors
while (!BatchJoiner.PLUGIN_TYPE.equals(pluginType) && !ErrorTransform.PLUGIN_TYPE.equals(pluginType) && inputCollectionIter.hasNext()) {
stageData = stageData.union(inputCollectionIter.next());
}
}
SparkCollection<ErrorRecord<Object>> stageErrors = null;
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageInfo, sec);
if (stageData == null) {
// null in the other else-if conditions
if (sourcePluginType.equals(pluginType)) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = getSource(stageInfo);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else {
throw new IllegalStateException(String.format("Stage '%s' has no input and is not a source.", stageName));
}
} else if (BatchSink.PLUGIN_TYPE.equals(pluginType)) {
stageData.store(stageInfo, Compat.convert(new BatchSinkFunction(pluginFunctionContext)));
} else if (Transform.PLUGIN_TYPE.equals(pluginType)) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.transform(stageInfo);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else if (ErrorTransform.PLUGIN_TYPE.equals(pluginType)) {
// union all the errors coming into this stage
SparkCollection<ErrorRecord<Object>> inputErrors = null;
for (String inputStage : stageInputs) {
SparkCollection<ErrorRecord<Object>> inputErrorsFromStage = stageErrorCollections.get(inputStage);
if (inputErrorsFromStage == null) {
continue;
}
if (inputErrors == null) {
inputErrors = inputErrorsFromStage;
} else {
inputErrors = inputErrors.union(inputErrorsFromStage);
}
}
if (inputErrors != null) {
SparkCollection<Tuple2<Boolean, Object>> combinedData = inputErrors.flatMap(stageInfo, Compat.convert(new ErrorTransformFunction<>(pluginFunctionContext)));
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
}
} else if (SparkCompute.PLUGIN_TYPE.equals(pluginType)) {
SparkCompute<Object, Object> sparkCompute = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData = stageData.compute(stageInfo, sparkCompute);
} else if (SparkSink.PLUGIN_TYPE.equals(pluginType)) {
SparkSink<Object> sparkSink = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData.store(stageInfo, sparkSink);
} else if (BatchAggregator.PLUGIN_TYPE.equals(pluginType)) {
Integer partitions = stagePartitions.get(stageName);
SparkCollection<Tuple2<Boolean, Object>> combinedData = stageData.aggregate(stageInfo, partitions);
if (hasErrorOutput) {
// need to cache, otherwise the stage can be computed twice, once for output and once for errors.
combinedData.cache();
stageErrors = combinedData.flatMap(stageInfo, Compat.convert(new OutputFilter<>()));
}
stageData = combinedData.flatMap(stageInfo, Compat.convert(new ErrorFilter<>()));
} else if (BatchJoiner.PLUGIN_TYPE.equals(pluginType)) {
BatchJoiner<Object, Object, Object> joiner = pluginContext.newPluginInstance(stageName, macroEvaluator);
BatchJoinerRuntimeContext joinerRuntimeContext = pluginFunctionContext.createBatchRuntimeContext();
joiner.initialize(joinerRuntimeContext);
Map<String, SparkPairCollection<Object, Object>> preJoinStreams = new HashMap<>();
for (Map.Entry<String, SparkCollection<Object>> inputStreamEntry : inputDataCollections.entrySet()) {
String inputStage = inputStreamEntry.getKey();
SparkCollection<Object> inputStream = inputStreamEntry.getValue();
preJoinStreams.put(inputStage, addJoinKey(stageInfo, inputStage, inputStream));
}
Set<String> remainingInputs = new HashSet<>();
remainingInputs.addAll(inputDataCollections.keySet());
Integer numPartitions = stagePartitions.get(stageName);
SparkPairCollection<Object, List<JoinElement<Object>>> joinedInputs = null;
// inner join on required inputs
for (final String inputStageName : joiner.getJoinConfig().getRequiredInputs()) {
SparkPairCollection<Object, Object> preJoinCollection = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinCollection.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
JoinFlattenFunction<Object> joinFlattenFunction = new JoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.join(preJoinCollection).mapValues(joinFlattenFunction) : joinedInputs.join(preJoinCollection, numPartitions).mapValues(joinFlattenFunction);
}
remainingInputs.remove(inputStageName);
}
// outer join on non-required inputs
boolean isFullOuter = joinedInputs == null;
for (final String inputStageName : remainingInputs) {
SparkPairCollection<Object, Object> preJoinStream = preJoinStreams.get(inputStageName);
if (joinedInputs == null) {
joinedInputs = preJoinStream.mapValues(new InitialJoinFunction<>(inputStageName));
} else {
if (isFullOuter) {
OuterJoinFlattenFunction<Object> flattenFunction = new OuterJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.fullOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.fullOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
} else {
LeftJoinFlattenFunction<Object> flattenFunction = new LeftJoinFlattenFunction<>(inputStageName);
joinedInputs = numPartitions == null ? joinedInputs.leftOuterJoin(preJoinStream).mapValues(flattenFunction) : joinedInputs.leftOuterJoin(preJoinStream, numPartitions).mapValues(flattenFunction);
}
}
}
// should never happen, but removes warnings
if (joinedInputs == null) {
throw new IllegalStateException("There are no inputs into join stage " + stageName);
}
stageData = mergeJoinResults(stageInfo, joinedInputs).cache();
} else if (Windower.PLUGIN_TYPE.equals(pluginType)) {
Windower windower = pluginContext.newPluginInstance(stageName, macroEvaluator);
stageData = stageData.window(stageInfo, windower);
} else {
throw new IllegalStateException(String.format("Stage %s is of unsupported plugin type %s.", stageName, pluginType));
}
if (shouldCache(pipelinePhase, stageInfo)) {
stageData = stageData.cache();
if (stageErrors != null) {
stageErrors = stageErrors.cache();
}
}
stageDataCollections.put(stageName, stageData);
stageErrorCollections.put(stageName, stageErrors);
}
}
use of scala.Tuple2 in project cdap by caskdata.
the class NaiveBayesTrainer method run.
@Override
public void run(SparkExecutionPluginContext sparkContext, JavaRDD<StructuredRecord> input) throws Exception {
Preconditions.checkArgument(input.count() != 0, "Input RDD is empty.");
final HashingTF tf = new HashingTF(100);
JavaRDD<LabeledPoint> trainingData = input.map(new Function<StructuredRecord, LabeledPoint>() {
@Override
public LabeledPoint call(StructuredRecord record) throws Exception {
// should never happen, here to test app correctness in unit tests
if (inputSchema != null && !inputSchema.equals(record.getSchema())) {
throw new IllegalStateException("runtime schema does not match what was set at configure time.");
}
String text = record.get(config.fieldToClassify);
return new LabeledPoint((Double) record.get(config.predictionField), tf.transform(Lists.newArrayList(text.split(" "))));
}
});
trainingData.cache();
final NaiveBayesModel model = NaiveBayes.train(trainingData.rdd(), 1.0);
// save the model to a file in the output FileSet
JavaSparkContext javaSparkContext = sparkContext.getSparkContext();
FileSet outputFS = sparkContext.getDataset(config.fileSetName);
model.save(JavaSparkContext.toSparkContext(javaSparkContext), outputFS.getBaseLocation().append(config.path).toURI().getPath());
JavaPairRDD<Long, String> textsToClassify = sparkContext.fromStream(TEXTS_TO_CLASSIFY, String.class);
JavaRDD<Vector> featuresToClassify = textsToClassify.map(new Function<Tuple2<Long, String>, Vector>() {
@Override
public Vector call(Tuple2<Long, String> longWritableTextTuple2) throws Exception {
String text = longWritableTextTuple2._2();
return tf.transform(Lists.newArrayList(text.split(" ")));
}
});
JavaRDD<Double> predict = model.predict(featuresToClassify);
LOG.info("Predictions: {}", predict.collect());
// key the predictions with the message
JavaPairRDD<String, Double> keyedPredictions = textsToClassify.values().zip(predict);
// convert to byte[],byte[] to write to data
JavaPairRDD<byte[], byte[]> bytesRDD = keyedPredictions.mapToPair(new PairFunction<Tuple2<String, Double>, byte[], byte[]>() {
@Override
public Tuple2<byte[], byte[]> call(Tuple2<String, Double> tuple) throws Exception {
return new Tuple2<>(Bytes.toBytes(tuple._1()), Bytes.toBytes(tuple._2()));
}
});
sparkContext.saveAsDataset(bytesRDD, CLASSIFIED_TEXTS);
}
Aggregations