use of io.cdap.cdap.data2.metadata.lineage.Lineage in project cdap by caskdata.
the class MetadataSubscriberService method processMessages.
@Override
protected void processMessages(StructuredTableContext structuredTableContext, Iterator<ImmutablePair<String, MetadataMessage>> messages) throws IOException, ConflictException {
Map<MetadataMessage.Type, MetadataMessageProcessor> processors = new HashMap<>();
// Loop over all fetched messages and process them with corresponding MetadataMessageProcessor
while (messages.hasNext()) {
ImmutablePair<String, MetadataMessage> next = messages.next();
String messageId = next.getFirst();
MetadataMessage message = next.getSecond();
MetadataMessageProcessor processor = processors.computeIfAbsent(message.getType(), type -> {
switch(type) {
case LINEAGE:
return new DataAccessLineageProcessor();
case FIELD_LINEAGE:
return new FieldLineageProcessor();
case USAGE:
return new UsageProcessor();
case WORKFLOW_TOKEN:
case WORKFLOW_STATE:
return new WorkflowProcessor();
case METADATA_OPERATION:
return new MetadataOperationProcessor(cConf);
case PROFILE_ASSIGNMENT:
case PROFILE_UNASSIGNMENT:
case ENTITY_CREATION:
case ENTITY_DELETION:
return new ProfileMetadataMessageProcessor(metadataStorage, structuredTableContext, metricsCollectionService);
default:
return null;
}
});
// noinspection ConstantConditions
if (processor == null) {
LOG.warn("Unsupported metadata message type {}. Message ignored.", message.getType());
continue;
}
try {
processor.processMessage(message, structuredTableContext);
conflictCount = 0;
} catch (ConflictException e) {
if (messageId.equals(conflictMessageId)) {
conflictCount++;
if (conflictCount >= maxRetriesOnConflict) {
LOG.warn("Skipping metadata message {} after processing it has caused {} consecutive conflicts: {}", message, conflictCount, e.getMessage());
continue;
}
} else {
conflictMessageId = messageId;
conflictCount = 1;
}
throw e;
}
}
}
use of io.cdap.cdap.data2.metadata.lineage.Lineage in project cdap by caskdata.
the class FieldLineageAdmin method getFieldsWithNoFieldLineage.
private Set<String> getFieldsWithNoFieldLineage(EndPoint dataset, Set<String> lineageFields) throws IOException {
// get the system properties of this dataset
Map<String, String> properties = metadataAdmin.getProperties(MetadataScope.SYSTEM, MetadataEntity.ofDataset(dataset.getNamespace(), dataset.getName()));
// the system metadata contains the schema of the dataset which is written by the DatasetSystemMetadataWriter
if (properties.containsKey(MetadataConstants.SCHEMA_KEY)) {
String schema = properties.get(MetadataConstants.SCHEMA_KEY);
Schema sc = Schema.parseJson(schema);
if (sc.getFields() != null) {
Set<String> schemaFields = sc.getFields().stream().map(Schema.Field::getName).collect(Collectors.toSet());
// filter out the fields that are part of the lineageFields
return sc.getFields().stream().map(Schema.Field::getName).filter(name -> !lineageFields.contains(name)).collect(Collectors.toSet());
}
} else {
LOG.trace("Received request to include schema fields for {} but no schema was found. Only fields present in " + "the lineage store will be returned.", dataset);
}
return Collections.emptySet();
}
use of io.cdap.cdap.data2.metadata.lineage.Lineage in project cdap by caskdata.
the class LineageAdminTest method testBranchLineage.
@Test
public void testBranchLineage() {
// Lineage for:
//
// ->D4 -> D5 -> P3 -> D6
// | |
// | |
// D1 -> P1 -> D2 -> P2 -> D3
// | | |
// | | |
// S1 -->| ---------------> P4 -> D7
TransactionRunner transactionRunner = getInjector().getInstance(TransactionRunner.class);
LineageStoreReader lineageReader = new DefaultLineageStoreReader(transactionRunner);
LineageWriter lineageWriter = new BasicLineageWriter(transactionRunner);
Store store = getInjector().getInstance(Store.class);
LineageAdmin lineageAdmin = new LineageAdmin(lineageReader, store);
// Add accesses
addRuns(store, run1, run2, run3, run4, run5);
// It is okay to use current time here since access time is ignore during assertions
lineageWriter.addAccess(run1, dataset1, AccessType.READ);
lineageWriter.addAccess(run1, dataset2, AccessType.WRITE);
lineageWriter.addAccess(run1, dataset4, AccessType.WRITE);
lineageWriter.addAccess(run2, dataset2, AccessType.READ);
lineageWriter.addAccess(run2, dataset3, AccessType.WRITE);
lineageWriter.addAccess(run2, dataset5, AccessType.WRITE);
lineageWriter.addAccess(run3, dataset5, AccessType.READ, null);
lineageWriter.addAccess(run3, dataset6, AccessType.WRITE, null);
lineageWriter.addAccess(run4, dataset2, AccessType.READ, null);
lineageWriter.addAccess(run4, dataset3, AccessType.READ, null);
lineageWriter.addAccess(run4, dataset7, AccessType.WRITE, null);
Lineage expectedLineage = new Lineage(ImmutableSet.of(new Relation(dataset1, program1, AccessType.READ, twillRunId(run1)), new Relation(dataset2, program1, AccessType.WRITE, twillRunId(run1)), new Relation(dataset4, program1, AccessType.WRITE, twillRunId(run1)), new Relation(dataset2, program2, AccessType.READ, twillRunId(run2)), new Relation(dataset3, program2, AccessType.WRITE, twillRunId(run2)), new Relation(dataset5, program2, AccessType.WRITE, twillRunId(run2)), new Relation(dataset5, program3, AccessType.READ, twillRunId(run3)), new Relation(dataset6, program3, AccessType.WRITE, twillRunId(run3)), new Relation(dataset2, program4, AccessType.READ, twillRunId(run4)), new Relation(dataset3, program4, AccessType.READ, twillRunId(run4)), new Relation(dataset7, program4, AccessType.WRITE, twillRunId(run4))));
// Lineage for D7
Assert.assertEquals(expectedLineage, lineageAdmin.computeLineage(dataset7, 500, 20000, 100));
// Lineage for D6
Assert.assertEquals(expectedLineage, lineageAdmin.computeLineage(dataset6, 500, 20000, 100));
// Lineage for D3
Assert.assertEquals(expectedLineage, lineageAdmin.computeLineage(dataset3, 500, 20000, 100));
}
use of io.cdap.cdap.data2.metadata.lineage.Lineage in project cdap by caskdata.
the class LineageAdminTest method testLocalDatasetsInWorkflow.
@Test
public void testLocalDatasetsInWorkflow() throws Exception {
TransactionRunner transactionRunner = getInjector().getInstance(TransactionRunner.class);
LineageStoreReader lineageReader = new DefaultLineageStoreReader(transactionRunner);
LineageWriter lineageWriter = new BasicLineageWriter(transactionRunner);
ApplicationId testApp = NamespaceId.DEFAULT.app("testLocalDatasets");
ProgramId workflowId = testApp.workflow("wf1");
// if the spark and mr job are inner jobs of workflow, they should be in the same app
ProgramId mrId1 = testApp.mr("mr1");
ProgramId mrId2 = testApp.mr("mr2");
ProgramId sparkId = testApp.spark("spark1");
ImmutableList<WorkflowNode> nodes = ImmutableList.of(new WorkflowActionNode("mr1", new ScheduleProgramInfo(SchedulableProgramType.MAPREDUCE, "mr1")), new WorkflowActionNode("mr2", new ScheduleProgramInfo(SchedulableProgramType.MAPREDUCE, "mr2")), new WorkflowActionNode("spark1", new ScheduleProgramInfo(SchedulableProgramType.SPARK, "spark1")));
WorkflowSpecification wfSpec = new WorkflowSpecification("test", "wf1", "", Collections.emptyMap(), nodes, Collections.emptyMap(), Collections.emptyMap());
ApplicationSpecification appSpec = new DefaultApplicationSpecification("testLocalDatasets", ProjectInfo.getVersion().toString(), "dummy app", null, NamespaceId.DEFAULT.artifact("testArtifact", "1.0").toApiArtifactId(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), ImmutableMap.of(workflowId.getProgram(), wfSpec), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
Store store = getInjector().getInstance(Store.class);
store.addApplication(testApp, appSpec);
LineageAdmin lineageAdmin = new LineageAdmin(lineageReader, store);
// Add accesses for D1 -|
// |-> MR1 -> LOCAL1 -> MR2 -> LOCAL2 -> SPARK -> D3
// D2 -|
// P1 and P2 are inner programs of the workflow
// We need to use current time here as metadata store stores access time using current time
ProgramRunId mr1Run = mrId1.run(RunIds.generate(System.currentTimeMillis()).getId());
ProgramRunId mr2Run = mrId2.run((RunIds.generate(System.currentTimeMillis()).getId()));
ProgramRunId sparkRun = sparkId.run(RunIds.generate(System.currentTimeMillis()).getId());
ProgramRunId workflow = workflowId.run(RunIds.generate(System.currentTimeMillis()).getId());
// local datasets always end with workflow run id
DatasetId localDataset1 = NamespaceId.DEFAULT.dataset("localDataset1" + workflow.getRun());
DatasetId localDataset2 = NamespaceId.DEFAULT.dataset("localDataset2" + workflow.getRun());
addRuns(store, workflow);
// only mr and spark can be inner programs
addWorkflowRuns(store, workflow.getProgram(), workflow.getRun(), mr1Run, mr2Run, sparkRun);
lineageWriter.addAccess(mr1Run, dataset1, AccessType.READ);
lineageWriter.addAccess(mr1Run, dataset2, AccessType.READ);
lineageWriter.addAccess(mr1Run, localDataset1, AccessType.WRITE);
lineageWriter.addAccess(mr2Run, localDataset1, AccessType.READ);
lineageWriter.addAccess(mr2Run, localDataset2, AccessType.WRITE);
lineageWriter.addAccess(sparkRun, localDataset2, AccessType.READ);
lineageWriter.addAccess(sparkRun, dataset3, AccessType.WRITE);
// compute the lineage without roll up, the local datasets and inner program should not roll up
Lineage expectedLineage = new Lineage(ImmutableSet.of(new Relation(dataset1, mrId1, AccessType.READ, twillRunId(mr1Run)), new Relation(dataset2, mrId1, AccessType.READ, twillRunId(mr1Run)), new Relation(localDataset1, mrId1, AccessType.WRITE, twillRunId(mr1Run)), new Relation(localDataset1, mrId2, AccessType.READ, twillRunId(mr2Run)), new Relation(localDataset2, mrId2, AccessType.WRITE, twillRunId(mr2Run)), new Relation(localDataset2, sparkId, AccessType.READ, twillRunId(sparkRun)), new Relation(dataset3, sparkId, AccessType.WRITE, twillRunId(sparkRun))));
Lineage resultLineage = lineageAdmin.computeLineage(dataset1, 500, System.currentTimeMillis() + 10000, 100, null);
// Lineage for D1
Assert.assertEquals(expectedLineage, resultLineage);
// D3 should have same lineage for all levels
resultLineage = lineageAdmin.computeLineage(dataset3, 500, System.currentTimeMillis() + 10000, 100, null);
Assert.assertEquals(expectedLineage, resultLineage);
// if only query for one level with no roll up, the roll up should not happen and the inner program and local
// dataset should get returned
expectedLineage = new Lineage(ImmutableSet.of(new Relation(dataset3, sparkId, AccessType.WRITE, twillRunId(sparkRun)), new Relation(localDataset2, sparkId, AccessType.READ, twillRunId(sparkRun))));
resultLineage = lineageAdmin.computeLineage(dataset3, 500, System.currentTimeMillis() + 10000, 1, null);
Assert.assertEquals(expectedLineage, resultLineage);
// query for roll up the workflow, all the inner program and local datasets should not be in the result,
// the entire workflow information should get returned
expectedLineage = new Lineage(ImmutableSet.of(new Relation(dataset1, workflowId, AccessType.READ, twillRunId(workflow)), new Relation(dataset2, workflowId, AccessType.READ, twillRunId(workflow)), new Relation(dataset3, workflowId, AccessType.WRITE, twillRunId(workflow))));
// D1, D2, D3 should give same result
resultLineage = lineageAdmin.computeLineage(dataset1, 500, System.currentTimeMillis() + 10000, 1, "workflow");
Assert.assertEquals(expectedLineage, resultLineage);
resultLineage = lineageAdmin.computeLineage(dataset2, 500, System.currentTimeMillis() + 10000, 1, "workflow");
Assert.assertEquals(expectedLineage, resultLineage);
resultLineage = lineageAdmin.computeLineage(dataset3, 500, System.currentTimeMillis() + 10000, 1, "workflow");
Assert.assertEquals(expectedLineage, resultLineage);
}
use of io.cdap.cdap.data2.metadata.lineage.Lineage in project cdap by caskdata.
the class LineageAdminTest method testSimpleLoopLineage.
@Test
public void testSimpleLoopLineage() {
// Lineage for D1 -> P1 -> D2 -> P2 -> D3 -> P3 -> D4
// | |
// | V
// |<-----------------
//
TransactionRunner transactionRunner = getInjector().getInstance(TransactionRunner.class);
LineageStoreReader lineageReader = new DefaultLineageStoreReader(transactionRunner);
LineageWriter lineageWriter = new BasicLineageWriter(transactionRunner);
Store store = getInjector().getInstance(Store.class);
LineageAdmin lineageAdmin = new LineageAdmin(lineageReader, store);
// Add access
addRuns(store, run1, run2, run3, run4, run5);
// It is okay to use current time here since access time is ignore during assertions
lineageWriter.addAccess(run1, dataset1, AccessType.READ);
lineageWriter.addAccess(run1, dataset2, AccessType.WRITE);
lineageWriter.addAccess(run2, dataset2, AccessType.READ);
lineageWriter.addAccess(run2, dataset1, AccessType.WRITE);
lineageWriter.addAccess(run2, dataset3, AccessType.WRITE);
lineageWriter.addAccess(run3, dataset3, AccessType.READ, null);
lineageWriter.addAccess(run3, dataset4, AccessType.WRITE, null);
Lineage expectedLineage = new Lineage(ImmutableSet.of(new Relation(dataset2, program1, AccessType.WRITE, twillRunId(run1)), new Relation(dataset1, program1, AccessType.READ, twillRunId(run1)), new Relation(dataset1, program2, AccessType.WRITE, twillRunId(run2)), new Relation(dataset2, program2, AccessType.READ, twillRunId(run2)), new Relation(dataset3, program2, AccessType.WRITE, twillRunId(run2)), new Relation(dataset4, program3, AccessType.WRITE, twillRunId(run3)), new Relation(dataset3, program3, AccessType.READ, twillRunId(run3))));
// Lineage for D1
Assert.assertEquals(expectedLineage, lineageAdmin.computeLineage(dataset1, 500, 20000, 100));
// Lineage for D2
Assert.assertEquals(expectedLineage, lineageAdmin.computeLineage(dataset2, 500, 20000, 100));
// Lineage for D1 for one level D1 -> P1 -> D2 -> P2 -> D3
// | |
// | V
// |<-----------------
//
Lineage oneLevelLineage = lineageAdmin.computeLineage(dataset1, 500, 20000, 1);
Assert.assertEquals(ImmutableSet.of(new Relation(dataset2, program1, AccessType.WRITE, twillRunId(run1)), new Relation(dataset1, program1, AccessType.READ, twillRunId(run1)), new Relation(dataset1, program2, AccessType.WRITE, twillRunId(run2)), new Relation(dataset2, program2, AccessType.READ, twillRunId(run2)), new Relation(dataset3, program2, AccessType.WRITE, twillRunId(run2))), oneLevelLineage.getRelations());
}
Aggregations