use of io.cdap.cdap.etl.api.lineage.field.FieldTransformOperation in project cdap by cdapio.
the class IdentityTransform method prepareRun.
@Override
public void prepareRun(StageSubmitterContext context) throws Exception {
super.prepareRun(context);
Schema schema = context.getInputSchema();
if (schema != null && schema.getFields() != null) {
schema.getFields().stream().map(Schema.Field::getName).collect(Collectors.toList()).forEach(field -> context.record(Collections.singletonList(new FieldTransformOperation("Identity transform " + field, "Identity transform", Collections.singletonList(field), field))));
}
}
use of io.cdap.cdap.etl.api.lineage.field.FieldTransformOperation in project cdap by cdapio.
the class FieldLineageProcessor method validateAndConvert.
public Set<Operation> validateAndConvert(Map<String, List<FieldOperation>> allStageOperations) {
Map<String, List<FieldOperation>> allOperations = new HashMap<>(allStageOperations);
// Set of stages for which no implicit merge operation is required even if
// stage has multiple inputs, for example join stages
Set<String> noMergeRequiredStages = new HashSet<>();
for (StageSpec stageSpec : pipelineSpec.getStages()) {
if (BatchJoiner.PLUGIN_TYPE.equals(stageSpec.getPlugin().getType())) {
noMergeRequiredStages.add(stageSpec.getName());
}
}
// validate the stage operations
Map<String, InvalidFieldOperations> stageInvalids = new HashMap<>();
Map<String, Map<String, List<String>>> stageRedundants = new HashMap<>();
for (StageSpec stageSpec : pipelineSpec.getStages()) {
Map<String, Schema> inputSchemas = stageSpec.getInputSchemas();
// TODO: CDAP-16428 populate the schema if macro is enabled to avoid this
if (inputSchemas == null) {
LOG.warn("Field lineage will not be recorded since the input schema is not set. ");
return Collections.emptySet();
}
// If current stage is of type JOIN add fields as inputstageName.fieldName
List<String> stageInputs = new ArrayList<>();
List<String> stageOutputs = new ArrayList<>();
if (BatchJoiner.PLUGIN_TYPE.equals(stageSpec.getPlugin().getType())) {
for (Map.Entry<String, Schema> entry : inputSchemas.entrySet()) {
Schema schema = entry.getValue();
if (schema != null && schema.getFields() != null) {
stageInputs.addAll(schema.getFields().stream().map(field -> entry.getKey() + "." + field.getName()).collect(Collectors.toList()));
}
}
} else {
for (Map.Entry<String, Schema> entry : inputSchemas.entrySet()) {
Schema schema = entry.getValue();
if (schema != null && schema.getFields() != null) {
stageInputs.addAll(schema.getFields().stream().map(Schema.Field::getName).collect(Collectors.toList()));
}
}
}
Schema outputSchema = stageSpec.getOutputSchema();
if (outputSchema != null && outputSchema.getFields() != null) {
stageOutputs.addAll(outputSchema.getFields().stream().map(Schema.Field::getName).collect(Collectors.toList()));
}
String stageName = stageSpec.getName();
// only auto generate for stages that have input and output schema
if (!stageInputs.isEmpty() && !stageOutputs.isEmpty()) {
allOperations.compute(stageName, (stage, fieldOperations) -> {
// output schema
if (fieldOperations == null || fieldOperations.isEmpty()) {
return Collections.singletonList(new FieldTransformOperation("Transform", "", stageInputs, stageOutputs));
}
return fieldOperations;
});
}
List<FieldOperation> fieldOperations = allOperations.computeIfAbsent(stageName, stage -> Collections.emptyList());
StageOperationsValidator.Builder builder = new StageOperationsValidator.Builder(fieldOperations);
builder.addStageInputs(stageInputs);
builder.addStageOutputs(stageOutputs);
StageOperationsValidator stageOperationsValidator = builder.build();
stageOperationsValidator.validate();
LOG.trace("Stage Name: {}", stageName);
LOG.trace("Stage Operations {}", GSON.toJson(fieldOperations));
LOG.trace("Stage inputs: {}", stageInputs);
LOG.trace("Stage outputs: {}", stageOutputs);
InvalidFieldOperations invalidFieldOperations = stageOperationsValidator.getStageInvalids();
if (invalidFieldOperations != null) {
stageInvalids.put(stageName, invalidFieldOperations);
}
if (!stageOperationsValidator.getRedundantOutputs().isEmpty()) {
stageRedundants.put(stageName, stageOperationsValidator.getRedundantOutputs());
}
}
if (!stageRedundants.isEmpty()) {
LOG.debug("The pipeline has redundant operations {} and they will be ignored", stageRedundants);
}
if (!stageInvalids.isEmpty()) {
// Do not throw but just log the exception message for validation failure
// Once most of the plugins are updated to write lineage exception can be thrown
LOG.debug(new InvalidLineageException(stageInvalids).getMessage());
}
LineageOperationsProcessor processor = new LineageOperationsProcessor(pipelineSpec.getConnections(), allOperations, noMergeRequiredStages);
return processor.process();
}
use of io.cdap.cdap.etl.api.lineage.field.FieldTransformOperation in project cdap by cdapio.
the class LineageOperationsProcessor method computeProcessedOperations.
/**
* Convert the all the stage operations to the platform operation, this method will go through the pipeline in
* topological order, so that the later stage will always know the origin of its operation.
* If a stage has multiple inputs except joiner, implicit merge operations will be generated in order to for further
* stages to look up the origins.
* For joiners, the input field name should already contains the previous stage name.
*
* @return a {@link Map} containing the operations with key of operation name and value of the corresponding
* platform {@link Operation}
*/
private Map<String, Operation> computeProcessedOperations() {
Map<String, Operation> processedOperations = new HashMap<>();
for (String stageName : topologicalOrder) {
Set<String> stageInputs = stageDag.getNodeInputs(stageName);
// if the stage has multiple inputs and it is not a joiner, compute the merge operations
if (stageInputs.size() > 1 && !noMergeRequiredStages.contains(stageName)) {
addMergeOperation(stageInputs, processedOperations);
}
List<FieldOperation> fieldOperations = stageOperations.get(stageName);
for (FieldOperation fieldOperation : fieldOperations) {
Operation newOperation = null;
String newOperationName = prefixedName(stageName, fieldOperation.getName());
Set<String> currentOperationOutputs = new LinkedHashSet<>();
switch(fieldOperation.getType()) {
case READ:
FieldReadOperation read = (FieldReadOperation) fieldOperation;
newOperation = new ReadOperation(newOperationName, read.getDescription(), read.getSource(), read.getOutputFields());
currentOperationOutputs.addAll(read.getOutputFields());
break;
case TRANSFORM:
FieldTransformOperation transform = (FieldTransformOperation) fieldOperation;
List<InputField> inputFields = createInputFields(transform.getInputFields(), stageName, processedOperations);
newOperation = new TransformOperation(newOperationName, transform.getDescription(), inputFields, transform.getOutputFields());
currentOperationOutputs.addAll(transform.getOutputFields());
break;
case WRITE:
FieldWriteOperation write = (FieldWriteOperation) fieldOperation;
inputFields = createInputFields(write.getInputFields(), stageName, processedOperations);
newOperation = new WriteOperation(newOperationName, write.getDescription(), write.getSink(), inputFields);
break;
}
for (String currentOperationOutput : currentOperationOutputs) {
// For all fields outputted by the current operation assign the operation name as origin
// If the field appears in the output again for some other operation belonging to the same stage,
// its origin will get updated to the new operation
stageOutputsWithOrigins.get(stageName).put(currentOperationOutput, newOperation.getName());
}
processedOperations.put(newOperation.getName(), newOperation);
}
}
return processedOperations;
}
use of io.cdap.cdap.etl.api.lineage.field.FieldTransformOperation in project cdap by cdapio.
the class StageOperationsValidator method validate.
/**
* Validate the inputs and outputs for a stage.
*/
void validate() {
// Fields input to the stage are valid
Set<String> validInputsSoFar = new HashSet<>(stageInputs);
// Map of field name to the list of operations that generated that field.
// Map will contain fields that are yet to be validated
Map<String, List<FieldOperation>> unusedOutputs = new HashMap<>();
// Map of field name to the list of operations that generated that field.
// Map will contain fields that are redundant
// For example: if following operations are recorded by stage
//
// OP1: [a, b] -> [d]
// OP2: [b] -> [d]
// OP3: [d] -> [e]
//
// output d of OP1 is redundant, since OP3 will always read d generated by OP2
// so following map will contain d -> [OP1]
Map<String, List<FieldOperation>> redundantOutputs = new HashMap<>();
for (FieldOperation pipelineOperation : operations) {
switch(pipelineOperation.getType()) {
case READ:
FieldReadOperation read = (FieldReadOperation) pipelineOperation;
updateInvalidOutputs(Collections.emptyList(), unusedOutputs, redundantOutputs);
validInputsSoFar.addAll(read.getOutputFields());
for (String field : read.getOutputFields()) {
List<FieldOperation> origins = unusedOutputs.computeIfAbsent(field, k -> new ArrayList<>());
origins.add(pipelineOperation);
}
break;
case TRANSFORM:
FieldTransformOperation transform = (FieldTransformOperation) pipelineOperation;
// take no effect
if (transform.getInputFields().isEmpty() || transform.getOutputFields().isEmpty()) {
continue;
}
validateInputs(pipelineOperation.getName(), transform.getInputFields(), validInputsSoFar);
updateInvalidOutputs(transform.getInputFields(), unusedOutputs, redundantOutputs);
validInputsSoFar.addAll(transform.getOutputFields());
for (String field : transform.getOutputFields()) {
List<FieldOperation> origins = unusedOutputs.computeIfAbsent(field, k -> new ArrayList<>());
origins.add(pipelineOperation);
}
break;
case WRITE:
FieldWriteOperation write = (FieldWriteOperation) pipelineOperation;
validateInputs(pipelineOperation.getName(), write.getInputFields(), validInputsSoFar);
updateInvalidOutputs(write.getInputFields(), unusedOutputs, redundantOutputs);
break;
}
}
// At this point unusedOutputs map should only contain those fields as keys which are not used
// by any operation in the stage as an input. However those fields can still be part of output schema.
// We want to remove such keys which are part of output schema as well.
// We cannot simply do "unusedOutputs.removeAll(stageInputOutput.getOutputs()))"
// Consider following case assuming d is part of output schema:
// OP1: [a, b] -> [d]
// OP2: [b] -> [d]
// Here outout d from OP1 is redundant, since the d in output schema will always come from OP2.
// However d will not be in the redundantOutputs map, as we only put the redundant fields if they
// appear in input of some operation. Such redundancy should cause validation checks to fail.
Iterator<Map.Entry<String, List<FieldOperation>>> iterator = unusedOutputs.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, List<FieldOperation>> next = iterator.next();
String field = next.getKey();
List<FieldOperation> origins = next.getValue();
if (origins.size() > 1) {
List<FieldOperation> operations = redundantOutputs.computeIfAbsent(field, k -> new ArrayList<>());
// except the last origin, all others are redundant
operations.addAll(origins.subList(0, origins.size() - 1));
}
// No matter this field is or is not in the output schema of the stage, it is valid.
// For example, a Joiner joins two datasets D1,D2 based on the joiner key D1.K1, D2.K2, and
// decides to drop the joiner key in the output schema. The operation
// [D1.K1, D2.K2] ->[K1, K2] is a valid even though K1,K2 are not in the output schema.
iterator.remove();
}
this.invalidOutputs.putAll(unusedOutputs.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().stream().map(FieldOperation::getName).collect(Collectors.toList()))));
this.redundantOutputs.putAll(redundantOutputs.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().stream().map(FieldOperation::getName).collect(Collectors.toList()))));
}
use of io.cdap.cdap.etl.api.lineage.field.FieldTransformOperation in project cdap by cdapio.
the class LineageOperationProcessorTest method testMergeOperationsNonRepeat.
@Test
public void testMergeOperationsNonRepeat() {
// n1 -> n3 ----
// |---- n5
// n2 -> n4 ----
// operations (n1) -> (id, name)
// (n3) -> (body, offset)
// (n2.id) -> id
// (n2.name) -> name
// (n4.body) -> (id, name)
// (n5) -> (id, name)
Set<Connection> connections = new HashSet<>();
connections.add(new Connection("n1", "n3"));
connections.add(new Connection("n3", "n5"));
connections.add(new Connection("n2", "n4"));
connections.add(new Connection("n4", "n5"));
EndPoint src1 = EndPoint.of("default", "n1");
EndPoint src2 = EndPoint.of("default", "n2");
EndPoint dest = EndPoint.of("default", "n5");
Map<String, List<FieldOperation>> stageOperations = new HashMap<>();
stageOperations.put("n1", Collections.singletonList(new FieldReadOperation("read1", "read description", src1, "id", "name")));
stageOperations.put("n2", Collections.singletonList(new FieldReadOperation("read2", "read description", src2, "body", "offset")));
List<FieldOperation> n3Operations = stageOperations.computeIfAbsent("n3", k -> new ArrayList<>());
n3Operations.add(new FieldTransformOperation("identity1", "identity", Collections.singletonList("id"), "id"));
n3Operations.add(new FieldTransformOperation("identity2", "identity", Collections.singletonList("name"), "name"));
stageOperations.put("n4", Collections.singletonList(new FieldTransformOperation("generate", "generate", Collections.singletonList("body"), "id", "name")));
stageOperations.put("n5", Collections.singletonList(new FieldWriteOperation("write", "write", dest, "id", "name")));
LineageOperationsProcessor processor = new LineageOperationsProcessor(connections, stageOperations, Collections.emptySet());
Set<Operation> expectedOperations = new HashSet<>();
expectedOperations.add(new ReadOperation("n1.read1", "read description", src1, "id", "name"));
expectedOperations.add(new ReadOperation("n2.read2", "read description", src2, "body", "offset"));
expectedOperations.add(new TransformOperation("n3.identity1", "identity", Collections.singletonList(InputField.of("n1.read1", "id")), "id"));
expectedOperations.add(new TransformOperation("n3.identity2", "identity", Collections.singletonList(InputField.of("n1.read1", "name")), "name"));
expectedOperations.add(new TransformOperation("n4.generate", "generate", Collections.singletonList(InputField.of("n2.read2", "body")), "id", "name"));
expectedOperations.add(new TransformOperation("n3,n4.merge.id", "Merged stages: n3,n4", Arrays.asList(InputField.of("n3.identity1", "id"), InputField.of("n4.generate", "id")), "id"));
expectedOperations.add(new TransformOperation("n3,n4.merge.name", "Merged stages: n3,n4", Arrays.asList(InputField.of("n3.identity2", "name"), InputField.of("n4.generate", "name")), "name"));
expectedOperations.add(new TransformOperation("n3,n4.merge.body", "Merged stages: n3,n4", Collections.singletonList(InputField.of("n2.read2", "body")), "body"));
expectedOperations.add(new TransformOperation("n3,n4.merge.offset", "Merged stages: n3,n4", Collections.singletonList(InputField.of("n2.read2", "offset")), "offset"));
expectedOperations.add(new WriteOperation("n5.write", "write", dest, Arrays.asList(InputField.of("n3,n4.merge.id", "id"), InputField.of("n3,n4.merge.name", "name"))));
Set<Operation> process = processor.process();
Assert.assertEquals(expectedOperations, process);
}
Aggregations