use of io.cdap.cdap.etl.api.lineage.field.FieldReadOperation in project cdap by caskdata.
the class LineageOperationProcessorTest method testMergeOperationsNonRepeat.
@Test
public void testMergeOperationsNonRepeat() {
// n1 -> n3 ----
// |---- n5
// n2 -> n4 ----
// operations (n1) -> (id, name)
// (n3) -> (body, offset)
// (n2.id) -> id
// (n2.name) -> name
// (n4.body) -> (id, name)
// (n5) -> (id, name)
Set<Connection> connections = new HashSet<>();
connections.add(new Connection("n1", "n3"));
connections.add(new Connection("n3", "n5"));
connections.add(new Connection("n2", "n4"));
connections.add(new Connection("n4", "n5"));
EndPoint src1 = EndPoint.of("default", "n1");
EndPoint src2 = EndPoint.of("default", "n2");
EndPoint dest = EndPoint.of("default", "n5");
Map<String, List<FieldOperation>> stageOperations = new HashMap<>();
stageOperations.put("n1", Collections.singletonList(new FieldReadOperation("read1", "read description", src1, "id", "name")));
stageOperations.put("n2", Collections.singletonList(new FieldReadOperation("read2", "read description", src2, "body", "offset")));
List<FieldOperation> n3Operations = stageOperations.computeIfAbsent("n3", k -> new ArrayList<>());
n3Operations.add(new FieldTransformOperation("identity1", "identity", Collections.singletonList("id"), "id"));
n3Operations.add(new FieldTransformOperation("identity2", "identity", Collections.singletonList("name"), "name"));
stageOperations.put("n4", Collections.singletonList(new FieldTransformOperation("generate", "generate", Collections.singletonList("body"), "id", "name")));
stageOperations.put("n5", Collections.singletonList(new FieldWriteOperation("write", "write", dest, "id", "name")));
LineageOperationsProcessor processor = new LineageOperationsProcessor(connections, stageOperations, Collections.emptySet());
Set<Operation> expectedOperations = new HashSet<>();
expectedOperations.add(new ReadOperation("n1.read1", "read description", src1, "id", "name"));
expectedOperations.add(new ReadOperation("n2.read2", "read description", src2, "body", "offset"));
expectedOperations.add(new TransformOperation("n3.identity1", "identity", Collections.singletonList(InputField.of("n1.read1", "id")), "id"));
expectedOperations.add(new TransformOperation("n3.identity2", "identity", Collections.singletonList(InputField.of("n1.read1", "name")), "name"));
expectedOperations.add(new TransformOperation("n4.generate", "generate", Collections.singletonList(InputField.of("n2.read2", "body")), "id", "name"));
expectedOperations.add(new TransformOperation("n3,n4.merge.id", "Merged stages: n3,n4", Arrays.asList(InputField.of("n3.identity1", "id"), InputField.of("n4.generate", "id")), "id"));
expectedOperations.add(new TransformOperation("n3,n4.merge.name", "Merged stages: n3,n4", Arrays.asList(InputField.of("n3.identity2", "name"), InputField.of("n4.generate", "name")), "name"));
expectedOperations.add(new TransformOperation("n3,n4.merge.body", "Merged stages: n3,n4", Collections.singletonList(InputField.of("n2.read2", "body")), "body"));
expectedOperations.add(new TransformOperation("n3,n4.merge.offset", "Merged stages: n3,n4", Collections.singletonList(InputField.of("n2.read2", "offset")), "offset"));
expectedOperations.add(new WriteOperation("n5.write", "write", dest, Arrays.asList(InputField.of("n3,n4.merge.id", "id"), InputField.of("n3,n4.merge.name", "name"))));
Set<Operation> process = processor.process();
Assert.assertEquals(expectedOperations, process);
}
use of io.cdap.cdap.etl.api.lineage.field.FieldReadOperation in project cdap by caskdata.
the class StageOperationsValidatorTest method testInvalidInputs.
@Test
public void testInvalidInputs() {
FieldReadOperation read = new FieldReadOperation("read", "reading data", EndPoint.of("default", "file"), "offset", "body");
StageOperationsValidator.Builder builder = new StageOperationsValidator.Builder(Collections.singletonList(read));
builder.addStageOutputs(Arrays.asList("offset", "body"));
StageOperationsValidator stageOperationsValidator = builder.build();
stageOperationsValidator.validate();
Assert.assertNull(stageOperationsValidator.getStageInvalids());
FieldTransformOperation transform = new FieldTransformOperation("parse", "parsing data", Collections.singletonList("body"), Arrays.asList("name", "address", "zip"));
builder = new StageOperationsValidator.Builder(Collections.singletonList(transform));
builder.addStageInputs(Arrays.asList("offset", "body"));
builder.addStageOutputs(Arrays.asList("name", "address", "zip"));
stageOperationsValidator = builder.build();
stageOperationsValidator.validate();
Assert.assertNull(stageOperationsValidator.getStageInvalids());
FieldWriteOperation write = new FieldWriteOperation("write", "writing data", EndPoint.of("default", "file2"), "name", "address", "zip");
builder = new StageOperationsValidator.Builder(Collections.singletonList(write));
builder.addStageInputs(Arrays.asList("address", "zip"));
stageOperationsValidator = builder.build();
stageOperationsValidator.validate();
Assert.assertNotNull(stageOperationsValidator.getStageInvalids());
InvalidFieldOperations invalidFieldOperations = stageOperationsValidator.getStageInvalids();
Assert.assertEquals(1, invalidFieldOperations.getInvalidInputs().size());
Map<String, List<String>> invalidInputs = new HashMap<>();
invalidInputs.put("name", Collections.singletonList("write"));
Assert.assertEquals(invalidInputs, invalidFieldOperations.getInvalidInputs());
Assert.assertEquals(0, invalidFieldOperations.getInvalidOutputs().size());
// name is provided by output of the operation previous to write
List<FieldOperation> pipelineOperations = new ArrayList<>();
pipelineOperations.add(new FieldTransformOperation("name_lookup", "generating name", Collections.singletonList("address"), "name"));
pipelineOperations.add(new FieldWriteOperation("write", "writing data", EndPoint.of("default", "file2"), "name", "address", "zip"));
builder = new StageOperationsValidator.Builder(pipelineOperations);
builder.addStageInputs(Arrays.asList("address", "zip"));
stageOperationsValidator = builder.build();
stageOperationsValidator.validate();
Assert.assertNull(stageOperationsValidator.getStageInvalids());
}
use of io.cdap.cdap.etl.api.lineage.field.FieldReadOperation in project cdap by caskdata.
the class LineageOperationsProcessor method computeProcessedOperations.
/**
* Convert the all the stage operations to the platform operation, this method will go through the pipeline in
* topological order, so that the later stage will always know the origin of its operation.
* If a stage has multiple inputs except joiner, implicit merge operations will be generated in order to for further
* stages to look up the origins.
* For joiners, the input field name should already contains the previous stage name.
*
* @return a {@link Map} containing the operations with key of operation name and value of the corresponding
* platform {@link Operation}
*/
private Map<String, Operation> computeProcessedOperations() {
Map<String, Operation> processedOperations = new HashMap<>();
for (String stageName : topologicalOrder) {
Set<String> stageInputs = stageDag.getNodeInputs(stageName);
// if the stage has multiple inputs and it is not a joiner, compute the merge operations
if (stageInputs.size() > 1 && !noMergeRequiredStages.contains(stageName)) {
addMergeOperation(stageInputs, processedOperations);
}
List<FieldOperation> fieldOperations = stageOperations.get(stageName);
for (FieldOperation fieldOperation : fieldOperations) {
Operation newOperation = null;
String newOperationName = prefixedName(stageName, fieldOperation.getName());
Set<String> currentOperationOutputs = new LinkedHashSet<>();
switch(fieldOperation.getType()) {
case READ:
FieldReadOperation read = (FieldReadOperation) fieldOperation;
newOperation = new ReadOperation(newOperationName, read.getDescription(), read.getSource(), read.getOutputFields());
currentOperationOutputs.addAll(read.getOutputFields());
break;
case TRANSFORM:
FieldTransformOperation transform = (FieldTransformOperation) fieldOperation;
List<InputField> inputFields = createInputFields(transform.getInputFields(), stageName, processedOperations);
newOperation = new TransformOperation(newOperationName, transform.getDescription(), inputFields, transform.getOutputFields());
currentOperationOutputs.addAll(transform.getOutputFields());
break;
case WRITE:
FieldWriteOperation write = (FieldWriteOperation) fieldOperation;
inputFields = createInputFields(write.getInputFields(), stageName, processedOperations);
newOperation = new WriteOperation(newOperationName, write.getDescription(), write.getSink(), inputFields);
break;
}
for (String currentOperationOutput : currentOperationOutputs) {
// For all fields outputted by the current operation assign the operation name as origin
// If the field appears in the output again for some other operation belonging to the same stage,
// its origin will get updated to the new operation
stageOutputsWithOrigins.get(stageName).put(currentOperationOutput, newOperation.getName());
}
processedOperations.put(newOperation.getName(), newOperation);
}
}
return processedOperations;
}
use of io.cdap.cdap.etl.api.lineage.field.FieldReadOperation in project cdap by caskdata.
the class StageOperationsValidator method validate.
/**
* Validate the inputs and outputs for a stage.
*/
void validate() {
// Fields input to the stage are valid
Set<String> validInputsSoFar = new HashSet<>(stageInputs);
// Map of field name to the list of operations that generated that field.
// Map will contain fields that are yet to be validated
Map<String, List<FieldOperation>> unusedOutputs = new HashMap<>();
// Map of field name to the list of operations that generated that field.
// Map will contain fields that are redundant
// For example: if following operations are recorded by stage
//
// OP1: [a, b] -> [d]
// OP2: [b] -> [d]
// OP3: [d] -> [e]
//
// output d of OP1 is redundant, since OP3 will always read d generated by OP2
// so following map will contain d -> [OP1]
Map<String, List<FieldOperation>> redundantOutputs = new HashMap<>();
for (FieldOperation pipelineOperation : operations) {
switch(pipelineOperation.getType()) {
case READ:
FieldReadOperation read = (FieldReadOperation) pipelineOperation;
updateInvalidOutputs(Collections.emptyList(), unusedOutputs, redundantOutputs);
validInputsSoFar.addAll(read.getOutputFields());
for (String field : read.getOutputFields()) {
List<FieldOperation> origins = unusedOutputs.computeIfAbsent(field, k -> new ArrayList<>());
origins.add(pipelineOperation);
}
break;
case TRANSFORM:
FieldTransformOperation transform = (FieldTransformOperation) pipelineOperation;
// take no effect
if (transform.getInputFields().isEmpty() || transform.getOutputFields().isEmpty()) {
continue;
}
validateInputs(pipelineOperation.getName(), transform.getInputFields(), validInputsSoFar);
updateInvalidOutputs(transform.getInputFields(), unusedOutputs, redundantOutputs);
validInputsSoFar.addAll(transform.getOutputFields());
for (String field : transform.getOutputFields()) {
List<FieldOperation> origins = unusedOutputs.computeIfAbsent(field, k -> new ArrayList<>());
origins.add(pipelineOperation);
}
break;
case WRITE:
FieldWriteOperation write = (FieldWriteOperation) pipelineOperation;
validateInputs(pipelineOperation.getName(), write.getInputFields(), validInputsSoFar);
updateInvalidOutputs(write.getInputFields(), unusedOutputs, redundantOutputs);
break;
}
}
// At this point unusedOutputs map should only contain those fields as keys which are not used
// by any operation in the stage as an input. However those fields can still be part of output schema.
// We want to remove such keys which are part of output schema as well.
// We cannot simply do "unusedOutputs.removeAll(stageInputOutput.getOutputs()))"
// Consider following case assuming d is part of output schema:
// OP1: [a, b] -> [d]
// OP2: [b] -> [d]
// Here outout d from OP1 is redundant, since the d in output schema will always come from OP2.
// However d will not be in the redundantOutputs map, as we only put the redundant fields if they
// appear in input of some operation. Such redundancy should cause validation checks to fail.
Iterator<Map.Entry<String, List<FieldOperation>>> iterator = unusedOutputs.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, List<FieldOperation>> next = iterator.next();
String field = next.getKey();
List<FieldOperation> origins = next.getValue();
if (origins.size() > 1) {
List<FieldOperation> operations = redundantOutputs.computeIfAbsent(field, k -> new ArrayList<>());
// except the last origin, all others are redundant
operations.addAll(origins.subList(0, origins.size() - 1));
}
// No matter this field is or is not in the output schema of the stage, it is valid.
// For example, a Joiner joins two datasets D1,D2 based on the joiner key D1.K1, D2.K2, and
// decides to drop the joiner key in the output schema. The operation
// [D1.K1, D2.K2] ->[K1, K2] is a valid even though K1,K2 are not in the output schema.
iterator.remove();
}
this.invalidOutputs.putAll(unusedOutputs.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().stream().map(FieldOperation::getName).collect(Collectors.toList()))));
this.redundantOutputs.putAll(redundantOutputs.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().stream().map(FieldOperation::getName).collect(Collectors.toList()))));
}
use of io.cdap.cdap.etl.api.lineage.field.FieldReadOperation in project cdap by cdapio.
the class LineageOperationsProcessor method computeProcessedOperations.
/**
* Convert the all the stage operations to the platform operation, this method will go through the pipeline in
* topological order, so that the later stage will always know the origin of its operation.
* If a stage has multiple inputs except joiner, implicit merge operations will be generated in order to for further
* stages to look up the origins.
* For joiners, the input field name should already contains the previous stage name.
*
* @return a {@link Map} containing the operations with key of operation name and value of the corresponding
* platform {@link Operation}
*/
private Map<String, Operation> computeProcessedOperations() {
Map<String, Operation> processedOperations = new HashMap<>();
for (String stageName : topologicalOrder) {
Set<String> stageInputs = stageDag.getNodeInputs(stageName);
// if the stage has multiple inputs and it is not a joiner, compute the merge operations
if (stageInputs.size() > 1 && !noMergeRequiredStages.contains(stageName)) {
addMergeOperation(stageInputs, processedOperations);
}
List<FieldOperation> fieldOperations = stageOperations.get(stageName);
for (FieldOperation fieldOperation : fieldOperations) {
Operation newOperation = null;
String newOperationName = prefixedName(stageName, fieldOperation.getName());
Set<String> currentOperationOutputs = new LinkedHashSet<>();
switch(fieldOperation.getType()) {
case READ:
FieldReadOperation read = (FieldReadOperation) fieldOperation;
newOperation = new ReadOperation(newOperationName, read.getDescription(), read.getSource(), read.getOutputFields());
currentOperationOutputs.addAll(read.getOutputFields());
break;
case TRANSFORM:
FieldTransformOperation transform = (FieldTransformOperation) fieldOperation;
List<InputField> inputFields = createInputFields(transform.getInputFields(), stageName, processedOperations);
newOperation = new TransformOperation(newOperationName, transform.getDescription(), inputFields, transform.getOutputFields());
currentOperationOutputs.addAll(transform.getOutputFields());
break;
case WRITE:
FieldWriteOperation write = (FieldWriteOperation) fieldOperation;
inputFields = createInputFields(write.getInputFields(), stageName, processedOperations);
newOperation = new WriteOperation(newOperationName, write.getDescription(), write.getSink(), inputFields);
break;
}
for (String currentOperationOutput : currentOperationOutputs) {
// For all fields outputted by the current operation assign the operation name as origin
// If the field appears in the output again for some other operation belonging to the same stage,
// its origin will get updated to the new operation
stageOutputsWithOrigins.get(stageName).put(currentOperationOutput, newOperation.getName());
}
processedOperations.put(newOperation.getName(), newOperation);
}
}
return processedOperations;
}
Aggregations