use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class DataStreamsSparkLauncher method initialize.
@TransactionPolicy(TransactionControl.EXPLICIT)
@Override
public void initialize() throws Exception {
SparkClientContext context = getContext();
String arguments = Joiner.on(", ").withKeyValueSeparator("=").join(context.getRuntimeArguments());
WRAPPERLOGGER.info("Pipeline '{}' is started by user '{}' with arguments {}", context.getApplicationSpecification().getName(), UserGroupInformation.getCurrentUser().getShortUserName(), arguments);
DataStreamsPipelineSpec spec = GSON.fromJson(context.getSpecification().getProperty(Constants.PIPELINEID), DataStreamsPipelineSpec.class);
PipelinePluginContext pluginContext = new SparkPipelinePluginContext(context, context.getMetrics(), true, true);
int numSources = 0;
for (StageSpec stageSpec : spec.getStages()) {
if (StreamingSource.PLUGIN_TYPE.equals(stageSpec.getPlugin().getType())) {
StreamingSource<Object> streamingSource = pluginContext.newPluginInstance(stageSpec.getName());
numSources = numSources + streamingSource.getRequiredExecutors();
}
}
SparkConf sparkConf = new SparkConf();
sparkConf.set("spark.streaming.backpressure.enabled", "true");
sparkConf.set("spark.spark.streaming.blockInterval", String.valueOf(spec.getBatchIntervalMillis() / 5));
sparkConf.set("spark.maxRemoteBlockSizeFetchToMem", String.valueOf(Integer.MAX_VALUE - 512));
// spark... makes you set this to at least the number of receivers (streaming sources)
// because it holds one thread per receiver, or one core in distributed mode.
// so... we have to set this hacky master variable based on the isUnitTest setting in the config
String extraOpts = spec.getExtraJavaOpts();
if (extraOpts != null && !extraOpts.isEmpty()) {
sparkConf.set("spark.driver.extraJavaOptions", extraOpts);
sparkConf.set("spark.executor.extraJavaOptions", extraOpts);
}
// without this, stopping will hang on machines with few cores.
sparkConf.set("spark.rpc.netty.dispatcher.numThreads", String.valueOf(numSources + 2));
sparkConf.setMaster(String.format("local[%d]", numSources + 2));
sparkConf.set("spark.executor.instances", String.valueOf(numSources + 2));
if (spec.isUnitTest()) {
sparkConf.setMaster(String.format("local[%d]", numSources + 1));
}
// override defaults with any user provided engine configs
int minExecutors = numSources + 1;
for (Map.Entry<String, String> property : spec.getProperties().entrySet()) {
if ("spark.executor.instances".equals(property.getKey())) {
// don't let the user set this to something that doesn't make sense
try {
int numExecutors = Integer.parseInt(property.getValue());
if (numExecutors < minExecutors) {
LOG.warn("Number of executors {} is less than the minimum number required to run the pipeline. " + "Automatically increasing it to {}", numExecutors, minExecutors);
numExecutors = minExecutors;
}
sparkConf.set(property.getKey(), String.valueOf(numExecutors));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Number of spark executors was set to invalid value " + property.getValue(), e);
}
} else {
sparkConf.set(property.getKey(), property.getValue());
}
}
context.setSparkConf(sparkConf);
WRAPPERLOGGER.info("Pipeline '{}' running", context.getApplicationSpecification().getName());
}
use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class StreamingMultiSinkFunction method call.
@Override
public void call(JavaRDD<RecordInfo<Object>> data, Time batchTime) throws Exception {
long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
Map<String, SubmitterLifecycle<?>> stages = createStages(evaluator);
// call prepareRun() on all the stages in the group
// need to call it in an order that guarantees that inputs are called before outputs
// this is because plugins can call getArguments().set() in the prepareRun() method,
// which downstream stages should be able to read
List<String> traversalOrder = new ArrayList(group.size());
for (String stageName : phaseSpec.getPhase().getDag().getTopologicalOrder()) {
if (group.contains(stageName)) {
traversalOrder.add(stageName);
}
}
for (String stageName : traversalOrder) {
SubmitterLifecycle<?> plugin = stages.get(stageName);
StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
try {
prepareRun(pipelineRuntime, sinkFactory, stageSpec, plugin);
} catch (Exception e) {
LOG.error("Error preparing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
return;
}
}
// run the actual transforms and sinks in this group
boolean ranSuccessfully = true;
try {
MultiSinkFunction multiSinkFunction = new MultiSinkFunction(sec, phaseSpec, group, collectors);
Set<String> outputNames = sinkFactory.writeCombinedRDD(data.flatMapToPair(multiSinkFunction), sec, sinkNames);
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
for (String outputName : outputNames) {
ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
}
}
});
} catch (Exception e) {
LOG.error("Error writing to sinks {} for the batch for time {}.", sinkNames, logicalStartTime, e);
ranSuccessfully = false;
}
// run onRunFinish() for each sink
for (String stageName : traversalOrder) {
SubmitterLifecycle<?> plugin = stages.get(stageName);
StageSpec stageSpec = phaseSpec.getPhase().getStage(stageName);
try {
onRunFinish(pipelineRuntime, sinkFactory, stageSpec, plugin, ranSuccessfully);
} catch (Exception e) {
LOG.warn("Unable to execute onRunFinish for sink {}", stageName, e);
}
}
}
use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class StreamingBatchSinkFunction method call.
@Override
public void call(JavaRDD<T> data, Time batchTime) throws Exception {
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
final SparkBatchSinkFactory sinkFactory = new SparkBatchSinkFactory();
final String stageName = stageSpec.getName();
final BatchSink<Object, Object, Object> batchSink = pluginContext.newPluginInstance(stageName, evaluator);
final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, logicalStartTime);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.prepareRun(sinkContext);
}
});
isPrepared = true;
PluginFunctionContext pluginFunctionContext = new PluginFunctionContext(stageSpec, sec, pipelineRuntime.getArguments().asMap(), batchTime.milliseconds(), new NoopStageStatisticsCollector());
Set<String> outputNames = sinkFactory.writeFromRDD(data.flatMapToPair(new BatchSinkFunction<T, Object, Object>(pluginFunctionContext, functionCache)), sec, stageName);
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
for (String outputName : outputNames) {
ExternalDatasets.registerLineage(sec.getAdmin(), outputName, AccessType.WRITE, null, () -> context.getDataset(outputName));
}
}
});
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.onRunFinish(true, sinkContext);
}
});
} catch (Exception e) {
LOG.error("Error writing to sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkBatchSinkContext sinkContext = new SparkBatchSinkContext(sinkFactory, sec, datasetContext, pipelineRuntime, stageSpec);
batchSink.onRunFinish(false, sinkContext);
}
});
}
}
}
use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class StreamingSparkSinkFunction method call.
@Override
public void call(JavaRDD<T> data, Time batchTime) throws Exception {
if (data.isEmpty()) {
return;
}
final long logicalStartTime = batchTime.milliseconds();
MacroEvaluator evaluator = new DefaultMacroEvaluator(new BasicArguments(sec), logicalStartTime, sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
final PluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), stageSpec.isStageLoggingEnabled(), stageSpec.isProcessTimingEnabled());
final PipelineRuntime pipelineRuntime = new SparkPipelineRuntime(sec, batchTime.milliseconds());
final String stageName = stageSpec.getName();
final SparkSink<T> sparkSink = pluginContext.newPluginInstance(stageName, evaluator);
boolean isPrepared = false;
boolean isDone = false;
try {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
sparkSink.prepareRun(context);
}
});
isPrepared = true;
final SparkExecutionPluginContext sparkExecutionPluginContext = new SparkStreamingExecutionContext(sec, JavaSparkContext.fromSparkContext(data.rdd().context()), logicalStartTime, stageSpec);
final JavaRDD<T> countedRDD = data.map(new CountingFunction<T>(stageName, sec.getMetrics(), "records.in", null)).cache();
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
sparkSink.run(sparkExecutionPluginContext, countedRDD);
}
});
isDone = true;
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
sparkSink.onRunFinish(true, context);
}
});
} catch (Exception e) {
LOG.error("Error while executing sink {} for the batch for time {}.", stageName, logicalStartTime, e);
} finally {
if (isPrepared && !isDone) {
sec.execute(new TxRunnable() {
@Override
public void run(DatasetContext datasetContext) throws Exception {
SparkPluginContext context = new BasicSparkPluginContext(null, pipelineRuntime, stageSpec, datasetContext, sec.getAdmin());
sparkSink.onRunFinish(false, context);
}
});
}
}
}
use of io.cdap.cdap.etl.spark.plugin.SparkPipelinePluginContext in project cdap by caskdata.
the class JavaSparkMainWrapper method run.
@Override
public void run(JavaSparkExecutionContext sec) throws Exception {
String stageName = sec.getSpecification().getProperty(ExternalSparkProgram.STAGE_NAME);
BatchPhaseSpec batchPhaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
PipelinePluginContext pluginContext = new SparkPipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), batchPhaseSpec.isStageLoggingEnabled(), batchPhaseSpec.isProcessTimingEnabled());
Class<?> mainClass = pluginContext.loadPluginClass(stageName);
// if it's a CDAP JavaSparkMain, instantiate it and call the run method
if (JavaSparkMain.class.isAssignableFrom(mainClass)) {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
JavaSparkMain javaSparkMain = pluginContext.newPluginInstance(stageName, macroEvaluator);
javaSparkMain.run(sec);
} else {
// otherwise, assume there is a 'main' method and call it
String programArgs = getProgramArgs(sec, stageName);
String[] args = programArgs == null ? RuntimeArguments.toPosixArray(sec.getRuntimeArguments()) : programArgs.split(" ");
final Method mainMethod = mainClass.getMethod("main", String[].class);
final Object[] methodArgs = new Object[1];
methodArgs[0] = args;
Caller caller = pluginContext.getCaller(stageName);
caller.call(new Callable<Void>() {
@Override
public Void call() throws Exception {
mainMethod.invoke(null, methodArgs);
return null;
}
});
}
}
Aggregations