use of io.cdap.cdap.etl.proto.Connection in project cdap by caskdata.
the class LineageOperationProcessorTest method testComplexMerge.
@Test
public void testComplexMerge() {
//
// n1----n2---
// | |-------n6
// |----n5---|
// n3----n4--- |---n7----n8
//
//
// n1: read: file1 -> offset,body
// n2: parse: body -> name, address, zip
// n3: read: file2 -> offset,body
// n4: parse: body -> name, address, zip
// n5: normalize: address -> address
// n5: rename: address -> state_address
// n6: write: offset, name, address -> file3
// n7: rename: offset -> file_offset
// n8: write: file_offset, name, address, zip -> file4
Set<Connection> connections = new HashSet<>();
connections.add(new Connection("n1", "n2"));
connections.add(new Connection("n2", "n5"));
connections.add(new Connection("n3", "n4"));
connections.add(new Connection("n4", "n5"));
connections.add(new Connection("n5", "n6"));
connections.add(new Connection("n5", "n7"));
connections.add(new Connection("n7", "n8"));
EndPoint n1EndPoint = EndPoint.of("ns", "file1");
EndPoint n3EndPoint = EndPoint.of("ns", "file2");
EndPoint n6EndPoint = EndPoint.of("ns", "file3");
EndPoint n8EndPoint = EndPoint.of("ns", "file4");
Map<String, List<FieldOperation>> stageOperations = new HashMap<>();
List<FieldOperation> fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldReadOperation("read", "reading file 1", n1EndPoint, "offset", "body"));
stageOperations.put("n1", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldTransformOperation("parse", "parsing file 1", Collections.singletonList("body"), "name", "address", "zip"));
stageOperations.put("n2", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldReadOperation("read", "reading file 2", n3EndPoint, "offset", "body"));
stageOperations.put("n3", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldTransformOperation("parse", "parsing file 2", Collections.singletonList("body"), "name", "address", "zip"));
stageOperations.put("n4", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldTransformOperation("normalize", "normalizing address", Collections.singletonList("address"), "address"));
fieldOperations.add(new FieldTransformOperation("rename", "renaming address to state_address", Collections.singletonList("address"), "state_address"));
stageOperations.put("n5", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldWriteOperation("write", "writing file 3", n6EndPoint, "offset", "name", "address"));
stageOperations.put("n6", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldTransformOperation("rename", "renaming offset to file_offset", Collections.singletonList("offset"), "file_offset"));
stageOperations.put("n7", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldWriteOperation("write", "writing file 4", n8EndPoint, "file_offset", "name", "address", "zip"));
stageOperations.put("n8", fieldOperations);
LineageOperationsProcessor processor = new LineageOperationsProcessor(connections, stageOperations, Collections.emptySet());
Set<Operation> processedOperations = processor.process();
Set<Operation> expectedOperations = new HashSet<>();
ReadOperation read = new ReadOperation("n1.read", "reading file 1", n1EndPoint, "offset", "body");
expectedOperations.add(read);
TransformOperation parse = new TransformOperation("n2.parse", "parsing file 1", Collections.singletonList(InputField.of("n1.read", "body")), "name", "address", "zip");
expectedOperations.add(parse);
read = new ReadOperation("n3.read", "reading file 2", n3EndPoint, "offset", "body");
expectedOperations.add(read);
parse = new TransformOperation("n4.parse", "parsing file 2", Collections.singletonList(InputField.of("n3.read", "body")), "name", "address", "zip");
expectedOperations.add(parse);
TransformOperation merge1 = new TransformOperation("n2,n4.merge.offset", "Merged stages: n2,n4", ImmutableList.of(InputField.of("n1.read", "offset"), InputField.of("n3.read", "offset")), "offset");
TransformOperation merge2 = new TransformOperation("n2,n4.merge.body", "Merged stages: n2,n4", ImmutableList.of(InputField.of("n1.read", "body"), InputField.of("n3.read", "body")), "body");
TransformOperation merge3 = new TransformOperation("n2,n4.merge.address", "Merged stages: n2,n4", ImmutableList.of(InputField.of("n2.parse", "address"), InputField.of("n4.parse", "address")), "address");
TransformOperation merge4 = new TransformOperation("n2,n4.merge.name", "Merged stages: n2,n4", ImmutableList.of(InputField.of("n2.parse", "name"), InputField.of("n4.parse", "name")), "name");
TransformOperation merge5 = new TransformOperation("n2,n4.merge.zip", "Merged stages: n2,n4", ImmutableList.of(InputField.of("n2.parse", "zip"), InputField.of("n4.parse", "zip")), "zip");
expectedOperations.add(merge1);
expectedOperations.add(merge2);
expectedOperations.add(merge3);
expectedOperations.add(merge4);
expectedOperations.add(merge5);
TransformOperation normalize = new TransformOperation("n5.normalize", "normalizing address", Collections.singletonList(InputField.of("n2,n4.merge.address", "address")), "address");
expectedOperations.add(normalize);
TransformOperation rename = new TransformOperation("n5.rename", "renaming address to state_address", Collections.singletonList(InputField.of("n5.normalize", "address")), "state_address");
expectedOperations.add(rename);
WriteOperation write = new WriteOperation("n6.write", "writing file 3", n6EndPoint, InputField.of("n2,n4.merge.offset", "offset"), InputField.of("n2,n4.merge.name", "name"), InputField.of("n5.normalize", "address"));
expectedOperations.add(write);
rename = new TransformOperation("n7.rename", "renaming offset to file_offset", Collections.singletonList(InputField.of("n2,n4.merge.offset", "offset")), "file_offset");
expectedOperations.add(rename);
write = new WriteOperation("n8.write", "writing file 4", n8EndPoint, InputField.of("n7.rename", "file_offset"), InputField.of("n2,n4.merge.name", "name"), InputField.of("n5.normalize", "address"), InputField.of("n2,n4.merge.zip", "zip"));
expectedOperations.add(write);
Assert.assertEquals(expectedOperations, processedOperations);
}
use of io.cdap.cdap.etl.proto.Connection in project cdap by caskdata.
the class LineageOperationProcessorTest method testAnotherSimplePipeline.
@Test
public void testAnotherSimplePipeline() {
// n1-->n2-->n3-->n4
// n1 => read: file -> (offset, body)
// n2 => parse: (body) -> (first_name, last_name) | n2
// n3 => concat: (first_name, last_name) -> (name) | n
// n4 => write: (offset, name) -> another_file
Set<Connection> connections = new HashSet<>();
connections.add(new Connection("n1", "n2"));
connections.add(new Connection("n2", "n3"));
connections.add(new Connection("n3", "n4"));
Map<String, List<FieldOperation>> stageOperations = new HashMap<>();
List<FieldOperation> fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldReadOperation("read", "some read", EndPoint.of("ns", "file1"), "offset", "body"));
stageOperations.put("n1", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldTransformOperation("parse", "parsing body", Collections.singletonList("body"), "first_name", "last_name"));
stageOperations.put("n2", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldTransformOperation("concat", "concatinating the fields", Arrays.asList("first_name", "last_name"), "name"));
stageOperations.put("n3", fieldOperations);
fieldOperations = new ArrayList<>();
fieldOperations.add(new FieldWriteOperation("write_op", "writing data to file", EndPoint.of("myns", "another_file"), Arrays.asList("offset", "name")));
stageOperations.put("n4", fieldOperations);
LineageOperationsProcessor processor = new LineageOperationsProcessor(connections, stageOperations, Collections.emptySet());
Set<Operation> processedOperations = processor.process();
ReadOperation read = new ReadOperation("n1.read", "some read", EndPoint.of("ns", "file1"), "offset", "body");
TransformOperation parse = new TransformOperation("n2.parse", "parsing body", Collections.singletonList(InputField.of("n1.read", "body")), "first_name", "last_name");
TransformOperation concat = new TransformOperation("n3.concat", "concatinating the fields", Arrays.asList(InputField.of("n2.parse", "first_name"), InputField.of("n2.parse", "last_name")), "name");
WriteOperation write = new WriteOperation("n4.write_op", "writing data to file", EndPoint.of("myns", "another_file"), Arrays.asList(InputField.of("n1.read", "offset"), InputField.of("n3.concat", "name")));
List<Operation> expectedOperations = new ArrayList<>();
expectedOperations.add(parse);
expectedOperations.add(concat);
expectedOperations.add(read);
expectedOperations.add(write);
Assert.assertEquals(new FieldLineageInfo(expectedOperations), new FieldLineageInfo(processedOperations));
}
use of io.cdap.cdap.etl.proto.Connection in project cdap by caskdata.
the class Dag method subsetFrom.
/**
* Return a subset of this dag starting from the specified stage, without going past any node in stopNodes.
* This is equivalent to taking the nodes from {@link #accessibleFrom(Set, Set)} and building a dag from them.
*
* @param stages the stages to start at
* @param stopNodes set of nodes to stop traversal on
* @return a dag created from the nodes accessible from the specified stage
*/
public Dag subsetFrom(Set<String> stages, Set<String> stopNodes) {
Set<String> nodes = accessibleFrom(stages, stopNodes);
Set<Connection> connections = new HashSet<>();
for (String node : nodes) {
for (String outputNode : outgoingConnections.get(node)) {
if (nodes.contains(outputNode)) {
connections.add(new Connection(node, outputNode));
}
}
}
return new Dag(connections);
}
use of io.cdap.cdap.etl.proto.Connection in project cdap by caskdata.
the class PipelinePlan method getConditionPhaseBranches.
/**
* @return Conditions along with their phase connections
*/
public Map<String, ConditionBranches> getConditionPhaseBranches() {
Map<String, ConditionBranches> conditionPhaseConnections = new HashMap<>();
for (Connection connection : phaseConnections) {
if (connection.getCondition() == null) {
continue;
}
if (!conditionPhaseConnections.containsKey(connection.getFrom())) {
conditionPhaseConnections.put(connection.getFrom(), new ConditionBranches(null, null));
}
ConditionBranches branches = conditionPhaseConnections.get(connection.getFrom());
String trueOutput;
String falseOutput;
if (connection.getCondition()) {
trueOutput = connection.getTo();
falseOutput = branches.getFalseOutput();
} else {
trueOutput = branches.getTrueOutput();
falseOutput = connection.getTo();
}
conditionPhaseConnections.put(connection.getFrom(), new ConditionBranches(trueOutput, falseOutput));
}
return conditionPhaseConnections;
}
use of io.cdap.cdap.etl.proto.Connection in project cdap by caskdata.
the class PipelineSpecGenerator method validateConfig.
/**
* Validate that this is a valid pipeline. A valid pipeline has the following properties:
*
* All stages in the pipeline have a unique name.
* Source stages have at least one output and no inputs.
* Sink stages have at least one input and no outputs.
* There are no cycles in the pipeline.
* All inputs into a stage have the same schema.
* ErrorTransforms only have BatchSource, Transform, or BatchAggregator as input stages.
* AlertPublishers have at least one input and no outputs and don't have SparkSink or BatchSink as input.
* Action stages can only be at the start or end of the pipeline.
* Condition stages have at most 2 outputs. Each stage on a condition's output branch has at most a single input.
*
* Returns the stages in the order they should be configured to ensure that all input stages are configured
* before their output.
*
* @param config the user provided configuration
* @return the order to configure the stages in
* @throws IllegalArgumentException if the pipeline is invalid
*/
protected ValidatedPipeline validateConfig(ETLConfig config) {
config.validate();
if (config.getStages().isEmpty()) {
throw new IllegalArgumentException("A pipeline must contain at least one stage.");
}
Set<String> actionStages = new HashSet<>();
Set<String> conditionStages = new HashSet<>();
Map<String, String> stageTypes = new HashMap<>();
// check stage name uniqueness
Set<String> stageNames = new HashSet<>();
for (ETLStage stage : config.getStages()) {
if (!stageNames.add(stage.getName())) {
throw new IllegalArgumentException(String.format("Invalid pipeline. Multiple stages are named %s. Please ensure all stage names are unique", stage.getName()));
}
// if stage is Action stage, add it to the Action stage set
if (isAction(stage.getPlugin().getType())) {
actionStages.add(stage.getName());
}
// if the stage is condition add it to the Condition stage set
if (stage.getPlugin().getType().equals(Condition.PLUGIN_TYPE)) {
conditionStages.add(stage.getName());
}
stageTypes.put(stage.getName(), stage.getPlugin().getType());
}
// check that the from and to are names of actual stages
// also check that conditions have at most 2 outgoing connections each label with true or
// false but not both
Map<String, Boolean> conditionBranch = new HashMap<>();
for (Connection connection : config.getConnections()) {
if (!stageNames.contains(connection.getFrom())) {
throw new IllegalArgumentException(String.format("Invalid connection %s. %s is not a stage.", connection, connection.getFrom()));
}
if (!stageNames.contains(connection.getTo())) {
throw new IllegalArgumentException(String.format("Invalid connection %s. %s is not a stage.", connection, connection.getTo()));
}
if (conditionStages.contains(connection.getFrom())) {
if (connection.getCondition() == null) {
String msg = String.format("For condition stage %s, the connection %s is not marked with either " + "'true' or 'false'.", connection.getFrom(), connection);
throw new IllegalArgumentException(msg);
}
// check if connection from the condition node is marked as true or false multiple times
if (conditionBranch.containsKey(connection.getFrom()) && connection.getCondition().equals(conditionBranch.get(connection.getFrom()))) {
String msg = String.format("For condition stage '%s', more than one outgoing connections are marked as %s.", connection.getFrom(), connection.getCondition());
throw new IllegalArgumentException(msg);
}
conditionBranch.put(connection.getFrom(), connection.getCondition());
}
}
List<ETLStage> traversalOrder = new ArrayList<>(stageNames.size());
// can only have empty connections if the pipeline consists of a single action.
if (config.getConnections().isEmpty()) {
if (actionStages.size() == 1 && stageNames.size() == 1) {
traversalOrder.add(config.getStages().iterator().next());
return new ValidatedPipeline(traversalOrder, config);
} else {
throw new IllegalArgumentException("Invalid pipeline. There are no connections between stages. " + "This is only allowed if the pipeline consists of a single action plugin.");
}
}
Dag dag = new Dag(config.getConnections());
Set<String> controlStages = Sets.union(actionStages, conditionStages);
Map<String, ETLStage> stages = new HashMap<>();
for (ETLStage stage : config.getStages()) {
String stageName = stage.getName();
Set<String> stageInputs = dag.getNodeInputs(stageName);
Set<String> stageOutputs = dag.getNodeOutputs(stageName);
String stageType = stage.getPlugin().getType();
boolean isSource = isSource(stageType);
boolean isSink = isSink(stageType);
// check source plugins are sources in the dag
if (isSource) {
if (!stageInputs.isEmpty() && !controlStages.containsAll(stageInputs)) {
throw new IllegalArgumentException(String.format("%s %s has incoming connections from %s. %s stages cannot have any incoming connections.", stageType, stageName, Joiner.on(',').join(stageInputs), stageType));
}
// check that source plugins are not present after any non-condition/action stage
Set<String> parents = dag.parentsOf(stageName);
Set<String> nonControlParents = Sets.difference(parents, controlStages);
if (nonControlParents.size() > 1) {
// the stage's nonControlParents should only contain itself
throw new IllegalArgumentException(String.format("%s %s is invalid. %s stages can only be placed at the start of the pipeline.", stageType, stageName, stageType));
}
} else if (isSink) {
if (!stageOutputs.isEmpty() && !controlStages.containsAll(stageOutputs)) {
throw new IllegalArgumentException(String.format("%s %s has outgoing connections to %s. %s stages cannot have any outgoing connections.", stageType, stageName, Joiner.on(',').join(stageOutputs), stageType));
}
} else if (ErrorTransform.PLUGIN_TYPE.equals(stageType)) {
for (String inputStage : stageInputs) {
String inputType = stageTypes.get(inputStage);
if (!VALID_ERROR_INPUTS.contains(inputType)) {
throw new IllegalArgumentException(String.format("ErrorTransform %s cannot have stage %s of type %s as input. Only %s stages can emit errors.", stageName, inputStage, inputType, Joiner.on(',').join(VALID_ERROR_INPUTS)));
}
}
}
boolean isAction = isAction(stageType);
if (!isAction && !stageType.equals(Condition.PLUGIN_TYPE) && !isSource && stageInputs.isEmpty()) {
throw new IllegalArgumentException(String.format("Stage %s is unreachable, it has no incoming connections.", stageName));
}
if (!isAction && !isSink && stageOutputs.isEmpty()) {
throw new IllegalArgumentException(String.format("Stage %s is a dead end, it has no outgoing connections.", stageName));
}
stages.put(stageName, stage);
}
// make sure actions are not in the middle of the pipeline -- only at the start and/or end
for (String actionStage : actionStages) {
Set<String> actionParents = dag.parentsOf(actionStage);
Set<String> actionChildren = dag.accessibleFrom(actionStage);
Set<String> nonControlParents = Sets.difference(actionParents, controlStages);
Set<String> nonControlChildren = Sets.difference(actionChildren, controlStages);
if (!nonControlChildren.isEmpty() && !nonControlParents.isEmpty()) {
throw new IllegalArgumentException(String.format("Action stage '%s' is invalid. Actions can only be placed at the start or end of the pipeline.", actionStage));
}
}
validateConditionBranches(conditionStages, dag);
for (String stageName : dag.getTopologicalOrder()) {
traversalOrder.add(stages.get(stageName));
}
return new ValidatedPipeline(traversalOrder, config);
}
Aggregations