use of io.cdap.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class SmartWorkflow method destroy.
@Override
public void destroy() {
WorkflowContext workflowContext = getContext();
PipelineRuntime pipelineRuntime = new PipelineRuntime(workflowContext, workflowMetrics);
// Execute the post actions only if pipeline is not running in preview mode.
if (!workflowContext.getDataTracer(PostAction.PLUGIN_TYPE).isEnabled()) {
for (Map.Entry<String, PostAction> endingActionEntry : postActions.entrySet()) {
String name = endingActionEntry.getKey();
PostAction action = endingActionEntry.getValue();
StageSpec stageSpec = stageSpecs.get(name);
BatchActionContext context = new WorkflowBackedActionContext(workflowContext, pipelineRuntime, stageSpec);
try {
action.run(context);
} catch (Throwable t) {
LOG.error("Error while running post action {}.", name, t);
}
}
}
Map<String, String> connectorDatasets = GSON.fromJson(workflowContext.getWorkflowSpecification().getProperty(Constants.CONNECTOR_DATASETS), STAGE_DATASET_MAP);
// publish all alerts
for (Map.Entry<String, AlertPublisher> alertPublisherEntry : alertPublishers.entrySet()) {
String stageName = alertPublisherEntry.getKey();
AlertPublisher alertPublisher = alertPublisherEntry.getValue();
FileSet alertConnector = workflowContext.getDataset(connectorDatasets.get(stageName));
try (CloseableIterator<Alert> alerts = new AlertReader(alertConnector)) {
if (!alerts.hasNext()) {
continue;
}
StageMetrics stageMetrics = new DefaultStageMetrics(workflowMetrics, stageName);
StageSpec stageSpec = stageSpecs.get(stageName);
AlertPublisherContext alertContext = new DefaultAlertPublisherContext(pipelineRuntime, stageSpec, workflowContext, workflowContext.getAdmin());
alertPublisher.initialize(alertContext);
TrackedIterator<Alert> trackedIterator = new TrackedIterator<>(alerts, stageMetrics, Constants.Metrics.RECORDS_IN);
alertPublisher.publish(trackedIterator);
} catch (Exception e) {
LOG.warn("Stage {} had errors publishing alerts. Alerts may not have been published.", stageName, e);
} finally {
try {
alertPublisher.destroy();
} catch (Exception e) {
LOG.warn("Error destroying alert publisher for stage {}", stageName, e);
}
}
}
ProgramStatus status = getContext().getState().getStatus();
if (status == ProgramStatus.FAILED) {
WRAPPERLOGGER.error("Pipeline '{}' failed.", getContext().getApplicationSpecification().getName());
} else {
WRAPPERLOGGER.info("Pipeline '{}' {}.", getContext().getApplicationSpecification().getName(), status == ProgramStatus.COMPLETED ? "succeeded" : status.name().toLowerCase());
}
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(pipelineRuntime.getArguments(), workflowContext.getLogicalStartTime(), workflowContext, workflowContext, workflowContext.getNamespace());
// Get resolved plugin properties
Map<String, Map<String, String>> resolvedProperties = new HashMap<>();
for (StageSpec spec : stageSpecs.values()) {
String stageName = spec.getName();
resolvedProperties.put(stageName, workflowContext.getPluginProperties(stageName, macroEvaluator).getProperties());
}
// Add resolved plugin properties to workflow token as a JSON String
workflowContext.getToken().put(RESOLVED_PLUGIN_PROPERTIES_MAP, GSON.toJson(resolvedProperties));
// record only if the Workflow is successful
if (status != ProgramStatus.COMPLETED) {
return;
}
// Collect field operations from each phase
WorkflowToken token = workflowContext.getToken();
List<NodeValue> allNodeValues = token.getAll(Constants.FIELD_OPERATION_KEY_IN_WORKFLOW_TOKEN);
if (allNodeValues.isEmpty()) {
// no field lineage recorded by any stage
return;
}
Map<String, List<FieldOperation>> allStageOperations = new HashMap<>();
for (StageSpec stageSpec : stageSpecs.values()) {
allStageOperations.put(stageSpec.getName(), new ArrayList<>());
}
for (NodeValue nodeValue : allNodeValues) {
Map<String, List<FieldOperation>> stageOperations = GSON.fromJson(nodeValue.getValue().toString(), STAGE_OPERATIONS_MAP);
for (Map.Entry<String, List<FieldOperation>> entry : stageOperations.entrySet()) {
// ignore them
if (allStageOperations.containsKey(entry.getKey())) {
allStageOperations.get(entry.getKey()).addAll(entry.getValue());
}
}
}
FieldLineageProcessor processor = new FieldLineageProcessor(spec);
Set<Operation> processedOperations = processor.validateAndConvert(allStageOperations);
if (!processedOperations.isEmpty()) {
workflowContext.record(processedOperations);
}
}
use of io.cdap.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class BatchSparkPipelineDriver method run.
@Override
public void run(DatasetContext context) throws Exception {
BatchPhaseSpec phaseSpec = GSON.fromJson(sec.getSpecification().getProperty(Constants.PIPELINEID), BatchPhaseSpec.class);
Path configFile = sec.getLocalizationContext().getLocalFile("HydratorSpark.config").toPath();
try (BufferedReader reader = Files.newBufferedReader(configFile, StandardCharsets.UTF_8)) {
String object = reader.readLine();
SparkBatchSourceSinkFactoryInfo sourceSinkInfo = GSON.fromJson(object, SparkBatchSourceSinkFactoryInfo.class);
sourceFactory = sourceSinkInfo.getSparkBatchSourceFactory();
sinkFactory = sourceSinkInfo.getSparkBatchSinkFactory();
stagePartitions = sourceSinkInfo.getStagePartitions();
}
datasetContext = context;
PipelinePluginContext pluginContext = new PipelinePluginContext(sec.getPluginContext(), sec.getMetrics(), phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
Map<String, StageStatisticsCollector> collectors = new HashMap<>();
if (phaseSpec.pipelineContainsCondition()) {
Iterator<StageSpec> iterator = phaseSpec.getPhase().iterator();
while (iterator.hasNext()) {
StageSpec spec = iterator.next();
collectors.put(spec.getName(), new SparkStageStatisticsCollector(jsc));
}
}
boolean isSuccessful = true;
try {
PipelinePluginInstantiator pluginInstantiator = new PipelinePluginInstantiator(pluginContext, sec.getMetrics(), phaseSpec, new SingleConnectorFactory());
boolean shouldConsolidateStages = Boolean.parseBoolean(sec.getRuntimeArguments().getOrDefault(Constants.CONSOLIDATE_STAGES, Boolean.TRUE.toString()));
boolean shouldCacheFunctions = Boolean.parseBoolean(sec.getRuntimeArguments().getOrDefault(Constants.CACHE_FUNCTIONS, Boolean.TRUE.toString()));
boolean isPreviewEnabled = phaseSpec.getPhase().size() == 0 || sec.getDataTracer(phaseSpec.getPhase().iterator().next().getName()).isEnabled();
// Initialize SQL engine instance if needed.
if (!isPreviewEnabled && phaseSpec.getSQLEngineStageSpec() != null) {
String sqlEngineStage = SQLEngineUtils.buildStageName(phaseSpec.getSQLEngineStageSpec().getPlugin().getName());
// Instantiate SQL engine and prepare run.
try {
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(new BasicArguments(sec), sec.getLogicalStartTime(), sec.getSecureStore(), sec.getServiceDiscoverer(), sec.getNamespace());
Object instance = pluginInstantiator.newPluginInstance(sqlEngineStage, macroEvaluator);
sqlEngineAdapter = new BatchSQLEngineAdapter((SQLEngine<?, ?, ?, ?>) instance, sec, jsc, collectors);
sqlEngineAdapter.prepareRun();
} catch (InstantiationException ie) {
LOG.error("Could not create plugin instance for SQLEngine class", ie);
} finally {
if (sqlEngineAdapter == null) {
LOG.warn("Could not instantiate SQLEngine instance for Transformation Pushdown");
}
}
}
runPipeline(phaseSpec, BatchSource.PLUGIN_TYPE, sec, stagePartitions, pluginInstantiator, collectors, sinkFactory.getUncombinableSinks(), shouldConsolidateStages, shouldCacheFunctions);
} catch (Throwable t) {
// Mark this execution as not successful.
isSuccessful = false;
// Rethrow
throw t;
} finally {
updateWorkflowToken(sec.getWorkflowToken(), collectors);
// Close SQL Engine Adapter if neeeded,
if (sqlEngineAdapter != null) {
sqlEngineAdapter.onRunFinish(isSuccessful);
sqlEngineAdapter.close();
}
}
}
use of io.cdap.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class ETLSpark method initialize.
@Override
@TransactionPolicy(TransactionControl.EXPLICIT)
public void initialize() throws Exception {
SparkClientContext context = getContext();
SparkConf sparkConf = new SparkConf();
sparkConf.set("spark.speculation", "false");
// turn off auto-broadcast by default until we better understand the implications and can set this to a
// value that we are confident is safe.
sparkConf.set("spark.sql.autoBroadcastJoinThreshold", "-1");
sparkConf.set("spark.maxRemoteBlockSizeFetchToMem", String.valueOf(Integer.MAX_VALUE - 512));
sparkConf.set("spark.network.timeout", "600s");
// Disable yarn app retries since spark already performs retries at a task level.
sparkConf.set("spark.yarn.maxAppAttempts", "1");
// to make sure fields that are the same but different casing are treated as different fields in auto-joins
// see CDAP-17024
sparkConf.set("spark.sql.caseSensitive", "true");
context.setSparkConf(sparkConf);
Map<String, String> properties = context.getSpecification().getProperties();
BatchPhaseSpec phaseSpec = GSON.fromJson(properties.get(Constants.PIPELINEID), BatchPhaseSpec.class);
for (Map.Entry<String, String> pipelineProperty : phaseSpec.getPipelineProperties().entrySet()) {
sparkConf.set(pipelineProperty.getKey(), pipelineProperty.getValue());
}
PipelineRuntime pipelineRuntime = new PipelineRuntime(context);
MacroEvaluator evaluator = new DefaultMacroEvaluator(pipelineRuntime.getArguments(), context.getLogicalStartTime(), context, context, context.getNamespace());
SparkPreparer preparer = new SparkPreparer(context, context.getMetrics(), evaluator, pipelineRuntime);
List<Finisher> finishers = preparer.prepare(phaseSpec);
finisher = new CompositeFinisher(finishers);
}
use of io.cdap.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class MultiSinkFunction method initializeBranchExecutors.
private void initializeBranchExecutors() {
emitter = new DefaultEmitter<>();
PipelinePluginInstantiator pluginInstantiator = new PipelinePluginInstantiator(pipelineRuntime.getPluginContext(), pipelineRuntime.getMetrics(), phaseSpec, new SingleConnectorFactory());
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(pipelineRuntime.getArguments(), pipelineRuntime.getLogicalStartTime(), pipelineRuntime.getSecureStore(), pipelineRuntime.getServiceDiscoverer(), pipelineRuntime.getNamespace());
executorFactory = new SparkTransformExecutorFactory(pluginInstantiator, macroEvaluator, null, collectors, dataTracers, pipelineRuntime, emitter);
/*
If the dag is:
|--> t1 --> k1
s1 --|
|--> k2
^
s2 ---|
the group is t1, k1, and k2.
*/
PipelinePhase pipelinePhase = phaseSpec.getPhase();
branchExecutors = new HashMap<>();
inputConnections = new HashMap<>();
for (String groupSource : group) {
// group "sources" are stages in the group that don't have an input from another stage in the group.
if (Sets.difference(pipelinePhase.getStageInputs(groupSource), group).isEmpty()) {
continue;
}
// get the branch by taking a subset of the pipeline starting from the "source".
// with the example above, the two branches are t1 -> k1, and k2.
PipelinePhase branch;
if (pipelinePhase.getSinks().contains(groupSource)) {
// pipelinePhase.subsetFrom() throws an exception if the new "source" is also a sink,
// since a Dag cannot be a single node. so build it manually.
branch = PipelinePhase.builder(pipelinePhase.getPluginTypes()).addStage(pipelinePhase.getStage(groupSource)).build();
} else {
branch = pipelinePhase.subsetFrom(Collections.singleton(groupSource));
}
try {
branchExecutors.put(groupSource, executorFactory.create(branch));
} catch (Exception e) {
throw new IllegalStateException(String.format("Unable to get subset of pipeline starting from stage %s. " + "This indicates a planning error. Please report this bug and turn off stage " + "consolidation by setting %s to false in the runtime arguments.", groupSource, Constants.CONSOLIDATE_STAGES), e);
}
/*
create a mapping from possible inputs to "group sources". This will help identify which incoming
records should be sent to which branch executor.
for example, the pipeline may look like:
|port a --> k1
s --> split --|
|port b --> k2
In this scenario, k1, and k2, are all in the same group, so the map contains:
{ stageName: split, port: a, type: output } -> [k1]
{ stageName: split, port: b, type: output } -> [k2]
A slightly more complicated example:
|--> k1
s1 --> transform --|
| |--> k2
|
|--> error collector --> k3
In this scenario, k1, k2, k3, and error collector are in the same group, so the map contains:
{ stageName: transform, type: output } -> [k1, k2]
{ stageName: transform, type: error } -> [k3]
*/
String groupSourceType = pipelinePhase.getStage(groupSource).getPluginType();
RecordType recordType = ErrorTransform.PLUGIN_TYPE.equals(groupSourceType) ? RecordType.ERROR : RecordType.OUTPUT;
for (String inputStage : pipelinePhase.getStageInputs(groupSource)) {
Map<String, StageSpec.Port> ports = pipelinePhase.getStage(inputStage).getOutputPorts();
String port = ports.get(groupSource).getPort();
InputInfo inputInfo = new InputInfo(inputStage, recordType, port);
Set<String> groupSources = inputConnections.computeIfAbsent(inputInfo, key -> new HashSet<>());
groupSources.add(groupSource);
}
}
}
use of io.cdap.cdap.etl.common.DefaultMacroEvaluator in project cdap by caskdata.
the class PipelineAction method run.
@Override
public void run() throws Exception {
CustomActionContext context = getContext();
Map<String, String> properties = context.getSpecification().getProperties();
BatchPhaseSpec phaseSpec = GSON.fromJson(properties.get(Constants.PIPELINEID), BatchPhaseSpec.class);
PipelinePhase phase = phaseSpec.getPhase();
StageSpec stageSpec = phase.iterator().next();
PluginContext pluginContext = new PipelinePluginContext(context, metrics, phaseSpec.isStageLoggingEnabled(), phaseSpec.isProcessTimingEnabled());
PipelineRuntime pipelineRuntime = new PipelineRuntime(context, metrics);
Action action = pluginContext.newPluginInstance(stageSpec.getName(), new DefaultMacroEvaluator(pipelineRuntime.getArguments(), context.getLogicalStartTime(), context, context, context.getNamespace()));
ActionContext actionContext = new BasicActionContext(context, pipelineRuntime, stageSpec);
if (!context.getDataTracer(stageSpec.getName()).isEnabled()) {
action.run(actionContext);
}
WorkflowToken token = context.getWorkflowToken();
if (token == null) {
throw new IllegalStateException("WorkflowToken cannot be null when action is executed through Workflow.");
}
for (Map.Entry<String, String> entry : pipelineRuntime.getArguments().getAddedArguments().entrySet()) {
token.put(entry.getKey(), entry.getValue());
}
}
Aggregations