use of co.cask.cdap.common.metadata.MetadataRecord in project cdap by caskdata.
the class LineageTestRun method testFlowLineage.
@Test
public void testFlowLineage() throws Exception {
NamespaceId namespace = new NamespaceId("testFlowLineage");
ApplicationId app = namespace.app(AllProgramsApp.NAME);
ProgramId flow = app.flow(AllProgramsApp.NoOpFlow.NAME);
DatasetId dataset = namespace.dataset(AllProgramsApp.DATASET_NAME);
StreamId stream = namespace.stream(AllProgramsApp.STREAM_NAME);
namespaceClient.create(new NamespaceMeta.Builder().setName(namespace).build());
try {
appClient.deploy(namespace, createAppJarFile(AllProgramsApp.class));
// Add metadata to applicaton
ImmutableMap<String, String> appProperties = ImmutableMap.of("app-key1", "app-value1");
addProperties(app, appProperties);
Assert.assertEquals(appProperties, getProperties(app, MetadataScope.USER));
ImmutableSet<String> appTags = ImmutableSet.of("app-tag1");
addTags(app, appTags);
Assert.assertEquals(appTags, getTags(app, MetadataScope.USER));
// Add metadata to flow
ImmutableMap<String, String> flowProperties = ImmutableMap.of("flow-key1", "flow-value1");
addProperties(flow, flowProperties);
Assert.assertEquals(flowProperties, getProperties(flow, MetadataScope.USER));
ImmutableSet<String> flowTags = ImmutableSet.of("flow-tag1", "flow-tag2");
addTags(flow, flowTags);
Assert.assertEquals(flowTags, getTags(flow, MetadataScope.USER));
// Add metadata to dataset
ImmutableMap<String, String> dataProperties = ImmutableMap.of("data-key1", "data-value1");
addProperties(dataset, dataProperties);
Assert.assertEquals(dataProperties, getProperties(dataset, MetadataScope.USER));
ImmutableSet<String> dataTags = ImmutableSet.of("data-tag1", "data-tag2");
addTags(dataset, dataTags);
Assert.assertEquals(dataTags, getTags(dataset, MetadataScope.USER));
// Add metadata to stream
ImmutableMap<String, String> streamProperties = ImmutableMap.of("stream-key1", "stream-value1");
addProperties(stream, streamProperties);
Assert.assertEquals(streamProperties, getProperties(stream, MetadataScope.USER));
ImmutableSet<String> streamTags = ImmutableSet.of("stream-tag1", "stream-tag2");
addTags(stream, streamTags);
Assert.assertEquals(streamTags, getTags(stream, MetadataScope.USER));
long startTime = TimeMathParser.nowInSeconds();
RunId flowRunId = runAndWait(flow);
// Wait for few seconds so that the stop time secs is more than start time secs.
TimeUnit.SECONDS.sleep(2);
waitForStop(flow, true);
long stopTime = TimeMathParser.nowInSeconds();
// Fetch dataset lineage
LineageRecord lineage = fetchLineage(dataset, startTime, stopTime, 10);
LineageRecord expected = LineageSerializer.toLineageRecord(startTime, stopTime, new Lineage(ImmutableSet.of(new Relation(dataset, flow, AccessType.UNKNOWN, flowRunId, ImmutableSet.of(flow.flowlet(AllProgramsApp.A.NAME))), new Relation(stream, flow, AccessType.READ, flowRunId, ImmutableSet.of(flow.flowlet(AllProgramsApp.A.NAME))))), Collections.<CollapseType>emptySet());
Assert.assertEquals(expected, lineage);
// Fetch dataset lineage with time strings
lineage = fetchLineage(dataset, "now-1h", "now+1h", 10);
Assert.assertEquals(expected.getRelations(), lineage.getRelations());
// Fetch stream lineage
lineage = fetchLineage(stream, startTime, stopTime, 10);
// same as dataset's lineage
Assert.assertEquals(expected, lineage);
// Fetch stream lineage with time strings
lineage = fetchLineage(stream, "now-1h", "now+1h", 10);
// same as dataset's lineage
Assert.assertEquals(expected.getRelations(), lineage.getRelations());
// Assert metadata
// Id.Flow needs conversion to Id.Program JIRA - CDAP-3658
Assert.assertEquals(toSet(new MetadataRecord(app, MetadataScope.USER, appProperties, appTags), new MetadataRecord(flow, MetadataScope.USER, flowProperties, flowTags), new MetadataRecord(dataset, MetadataScope.USER, dataProperties, dataTags), new MetadataRecord(stream, MetadataScope.USER, streamProperties, streamTags)), fetchRunMetadata(flow.run(flowRunId.getId())));
// Assert with a time range after the flow run should return no results
long laterStartTime = stopTime + 1000;
long laterEndTime = stopTime + 5000;
// Fetch stream lineage
lineage = fetchLineage(stream, laterStartTime, laterEndTime, 10);
Assert.assertEquals(LineageSerializer.toLineageRecord(laterStartTime, laterEndTime, new Lineage(ImmutableSet.<Relation>of()), Collections.<CollapseType>emptySet()), lineage);
// Assert with a time range before the flow run should return no results
long earlierStartTime = startTime - 5000;
long earlierEndTime = startTime - 1000;
// Fetch stream lineage
lineage = fetchLineage(stream, earlierStartTime, earlierEndTime, 10);
Assert.assertEquals(LineageSerializer.toLineageRecord(earlierStartTime, earlierEndTime, new Lineage(ImmutableSet.<Relation>of()), Collections.<CollapseType>emptySet()), lineage);
// Test bad time ranges
fetchLineage(dataset, "sometime", "sometime", 10, BadRequestException.class);
fetchLineage(dataset, "now+1h", "now-1h", 10, BadRequestException.class);
// Test non-existent run
assertRunMetadataNotFound(flow.run(RunIds.generate(1000).getId()));
} finally {
namespaceClient.delete(namespace);
}
}
use of co.cask.cdap.common.metadata.MetadataRecord in project cdap by caskdata.
the class LineageTestRun method testAllProgramsLineage.
@Test
public void testAllProgramsLineage() throws Exception {
NamespaceId namespace = new NamespaceId("testAllProgramsLineage");
ApplicationId app = namespace.app(AllProgramsApp.NAME);
ProgramId flow = app.flow(AllProgramsApp.NoOpFlow.NAME);
ProgramId mapreduce = app.mr(AllProgramsApp.NoOpMR.NAME);
ProgramId mapreduce2 = app.mr(AllProgramsApp.NoOpMR2.NAME);
ProgramId spark = app.spark(AllProgramsApp.NoOpSpark.NAME);
ProgramId service = app.service(AllProgramsApp.NoOpService.NAME);
ProgramId worker = app.worker(AllProgramsApp.NoOpWorker.NAME);
ProgramId workflow = app.workflow(AllProgramsApp.NoOpWorkflow.NAME);
DatasetId dataset = namespace.dataset(AllProgramsApp.DATASET_NAME);
DatasetId dataset2 = namespace.dataset(AllProgramsApp.DATASET_NAME2);
DatasetId dataset3 = namespace.dataset(AllProgramsApp.DATASET_NAME3);
StreamId stream = namespace.stream(AllProgramsApp.STREAM_NAME);
namespaceClient.create(new NamespaceMeta.Builder().setName(namespace.getNamespace()).build());
try {
appClient.deploy(namespace, createAppJarFile(AllProgramsApp.class));
// Add metadata
ImmutableSet<String> sparkTags = ImmutableSet.of("spark-tag1", "spark-tag2");
addTags(spark, sparkTags);
Assert.assertEquals(sparkTags, getTags(spark, MetadataScope.USER));
ImmutableSet<String> workerTags = ImmutableSet.of("worker-tag1");
addTags(worker, workerTags);
Assert.assertEquals(workerTags, getTags(worker, MetadataScope.USER));
ImmutableMap<String, String> datasetProperties = ImmutableMap.of("data-key1", "data-value1");
addProperties(dataset, datasetProperties);
Assert.assertEquals(datasetProperties, getProperties(dataset, MetadataScope.USER));
// Start all programs
RunId flowRunId = runAndWait(flow);
RunId mrRunId = runAndWait(mapreduce);
RunId mrRunId2 = runAndWait(mapreduce2);
RunId sparkRunId = runAndWait(spark);
runAndWait(workflow);
RunId workflowMrRunId = getRunId(mapreduce, mrRunId);
RunId serviceRunId = runAndWait(service);
// Worker makes a call to service to make it access datasets,
// hence need to make sure service starts before worker, and stops after it.
RunId workerRunId = runAndWait(worker);
// Wait for programs to finish
waitForStop(flow, true);
waitForStop(mapreduce, false);
waitForStop(mapreduce2, false);
waitForStop(spark, false);
waitForStop(workflow, false);
waitForStop(worker, false);
waitForStop(service, true);
long now = TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
long oneHour = TimeUnit.HOURS.toSeconds(1);
// Fetch dataset lineage
LineageRecord lineage = fetchLineage(dataset, now - oneHour, now + oneHour, toSet(CollapseType.ACCESS), 10);
// dataset is accessed by all programs
LineageRecord expected = LineageSerializer.toLineageRecord(now - oneHour, now + oneHour, new Lineage(ImmutableSet.of(// Dataset access
new Relation(dataset, flow, AccessType.UNKNOWN, flowRunId, toSet(flow.flowlet(AllProgramsApp.A.NAME))), new Relation(dataset, mapreduce, AccessType.WRITE, mrRunId), new Relation(dataset, mapreduce2, AccessType.WRITE, mrRunId2), new Relation(dataset2, mapreduce2, AccessType.READ, mrRunId2), new Relation(dataset, spark, AccessType.READ, sparkRunId), new Relation(dataset2, spark, AccessType.WRITE, sparkRunId), new Relation(dataset3, spark, AccessType.READ, sparkRunId), new Relation(dataset3, spark, AccessType.WRITE, sparkRunId), new Relation(dataset, mapreduce, AccessType.WRITE, workflowMrRunId), new Relation(dataset, service, AccessType.WRITE, serviceRunId), new Relation(dataset, worker, AccessType.WRITE, workerRunId), // Stream access
new Relation(stream, flow, AccessType.READ, flowRunId, ImmutableSet.of(flow.flowlet(AllProgramsApp.A.NAME))), new Relation(stream, mapreduce, AccessType.READ, mrRunId), new Relation(stream, spark, AccessType.READ, sparkRunId), new Relation(stream, mapreduce, AccessType.READ, workflowMrRunId), new Relation(stream, worker, AccessType.WRITE, workerRunId))), toSet(CollapseType.ACCESS));
Assert.assertEquals(expected, lineage);
// Fetch stream lineage
lineage = fetchLineage(stream, now - oneHour, now + oneHour, toSet(CollapseType.ACCESS), 10);
// stream too is accessed by all programs
Assert.assertEquals(expected, lineage);
// Assert metadata
// Id.Flow needs conversion to Id.Program JIRA - CDAP-3658
Assert.assertEquals(toSet(new MetadataRecord(app, MetadataScope.USER, emptyMap(), emptySet()), new MetadataRecord(flow, MetadataScope.USER, emptyMap(), emptySet()), new MetadataRecord(dataset, MetadataScope.USER, datasetProperties, emptySet()), new MetadataRecord(stream, MetadataScope.USER, emptyMap(), emptySet())), fetchRunMetadata(flow.run(flowRunId.getId())));
// Id.Worker needs conversion to Id.Program JIRA - CDAP-3658
ProgramId programForWorker = new ProgramId(worker.getNamespace(), worker.getApplication(), worker.getType(), worker.getEntityName());
Assert.assertEquals(toSet(new MetadataRecord(app, MetadataScope.USER, emptyMap(), emptySet()), new MetadataRecord(programForWorker, MetadataScope.USER, emptyMap(), workerTags), new MetadataRecord(dataset, MetadataScope.USER, datasetProperties, emptySet()), new MetadataRecord(stream, MetadataScope.USER, emptyMap(), emptySet())), fetchRunMetadata(worker.run(workerRunId.getId())));
// Id.Spark needs conversion to Id.Program JIRA - CDAP-3658
ProgramId programForSpark = new ProgramId(spark.getNamespace(), spark.getApplication(), spark.getType(), spark.getEntityName());
Assert.assertEquals(toSet(new MetadataRecord(app, MetadataScope.USER, emptyMap(), emptySet()), new MetadataRecord(programForSpark, MetadataScope.USER, emptyMap(), sparkTags), new MetadataRecord(dataset, MetadataScope.USER, datasetProperties, emptySet()), new MetadataRecord(dataset2, MetadataScope.USER, emptyMap(), emptySet()), new MetadataRecord(dataset3, MetadataScope.USER, emptyMap(), emptySet()), new MetadataRecord(stream, MetadataScope.USER, emptyMap(), emptySet())), fetchRunMetadata(spark.run(sparkRunId.getId())));
} finally {
namespaceClient.delete(namespace);
}
}
use of co.cask.cdap.common.metadata.MetadataRecord in project cdap by caskdata.
the class MetadataHttpHandlerTestRun method testMetadata.
@Test
public void testMetadata() throws Exception {
assertCleanState(MetadataScope.USER);
// Remove when nothing exists
removeAllMetadata();
assertCleanState(MetadataScope.USER);
// Add some properties and tags
Map<String, String> appProperties = ImmutableMap.of("aKey", "aValue");
Map<String, String> serviceProperties = ImmutableMap.of("sKey", "sValue");
Map<String, String> datasetProperties = ImmutableMap.of("dKey", "dValue");
Map<String, String> streamProperties = ImmutableMap.of("stKey", "stValue");
Map<String, String> viewProperties = ImmutableMap.of("viewKey", "viewValue");
Map<String, String> artifactProperties = ImmutableMap.of("rKey", "rValue");
Set<String> appTags = ImmutableSet.of("aTag");
Set<String> serviceTags = ImmutableSet.of("sTag");
Set<String> datasetTags = ImmutableSet.of("dTag");
Set<String> streamTags = ImmutableSet.of("stTag");
Set<String> viewTags = ImmutableSet.of("viewTag");
Set<String> artifactTags = ImmutableSet.of("rTag");
addProperties(application, appProperties);
addProperties(pingService, serviceProperties);
addProperties(myds, datasetProperties);
addProperties(mystream, streamProperties);
addProperties(myview, viewProperties);
addProperties(artifactId, artifactProperties);
addTags(application, appTags);
addTags(pingService, serviceTags);
addTags(myds, datasetTags);
addTags(mystream, streamTags);
addTags(myview, viewTags);
addTags(artifactId, artifactTags);
// verify app
Set<MetadataRecord> metadataRecords = getMetadata(application, MetadataScope.USER);
Assert.assertEquals(1, metadataRecords.size());
MetadataRecord metadata = metadataRecords.iterator().next();
Assert.assertEquals(MetadataScope.USER, metadata.getScope());
Assert.assertEquals(application, metadata.getEntityId());
Assert.assertEquals(appProperties, metadata.getProperties());
Assert.assertEquals(appTags, metadata.getTags());
// verify service
metadataRecords = getMetadata(pingService, MetadataScope.USER);
Assert.assertEquals(1, metadataRecords.size());
metadata = metadataRecords.iterator().next();
Assert.assertEquals(MetadataScope.USER, metadata.getScope());
Assert.assertEquals(pingService, metadata.getEntityId());
Assert.assertEquals(serviceProperties, metadata.getProperties());
Assert.assertEquals(serviceTags, metadata.getTags());
// verify dataset
metadataRecords = getMetadata(myds, MetadataScope.USER);
Assert.assertEquals(1, metadataRecords.size());
metadata = metadataRecords.iterator().next();
Assert.assertEquals(MetadataScope.USER, metadata.getScope());
Assert.assertEquals(myds, metadata.getEntityId());
Assert.assertEquals(datasetProperties, metadata.getProperties());
Assert.assertEquals(datasetTags, metadata.getTags());
// verify stream
metadataRecords = getMetadata(mystream, MetadataScope.USER);
Assert.assertEquals(1, metadataRecords.size());
metadata = metadataRecords.iterator().next();
Assert.assertEquals(MetadataScope.USER, metadata.getScope());
Assert.assertEquals(mystream, metadata.getEntityId());
Assert.assertEquals(streamProperties, metadata.getProperties());
Assert.assertEquals(streamTags, metadata.getTags());
// verify view
metadataRecords = getMetadata(myview, MetadataScope.USER);
Assert.assertEquals(1, metadataRecords.size());
metadata = metadataRecords.iterator().next();
Assert.assertEquals(MetadataScope.USER, metadata.getScope());
Assert.assertEquals(myview, metadata.getEntityId());
Assert.assertEquals(viewProperties, metadata.getProperties());
Assert.assertEquals(viewTags, metadata.getTags());
// verify artifact
metadataRecords = getMetadata(artifactId, MetadataScope.USER);
Assert.assertEquals(1, metadataRecords.size());
metadata = metadataRecords.iterator().next();
Assert.assertEquals(MetadataScope.USER, metadata.getScope());
Assert.assertEquals(artifactId, metadata.getEntityId());
Assert.assertEquals(artifactProperties, metadata.getProperties());
Assert.assertEquals(artifactTags, metadata.getTags());
// cleanup
removeAllMetadata();
assertCleanState(MetadataScope.USER);
}
use of co.cask.cdap.common.metadata.MetadataRecord in project cdap by caskdata.
the class GetMetadataCommand method perform.
@Override
public void perform(Arguments arguments, PrintStream output) throws Exception {
EntityId entity = EntityId.fromString(arguments.get(ArgumentName.ENTITY.toString()));
String scope = arguments.getOptional(ArgumentName.METADATA_SCOPE.toString());
Set<MetadataRecord> metadata = scope == null ? client.getMetadata(entity) : client.getMetadata(entity, MetadataScope.valueOf(scope.toUpperCase()));
Table table = Table.builder().setHeader("entity", "tags", "properties", "scope").setRows(Iterables.transform(metadata, new Function<MetadataRecord, List<String>>() {
@Nullable
@Override
public List<String> apply(MetadataRecord record) {
return Lists.newArrayList(record.getEntityId().toString(), Joiner.on("\n").join(record.getTags()), Joiner.on("\n").withKeyValueSeparator(":").join(record.getProperties()), record.getScope().name());
}
})).build();
cliConfig.getTableRenderer().render(cliConfig, output, table);
}
use of co.cask.cdap.common.metadata.MetadataRecord in project cdap by caskdata.
the class AbstractSystemMetadataWriterTest method testMetadataOverwrite.
@Test
public void testMetadataOverwrite() throws Exception {
DatasetId dsInstance = new DatasetId("ns1", "ds1");
DatasetSystemMetadataWriter datasetSystemMetadataWriter = new DatasetSystemMetadataWriter(store, dsInstance, TableProperties.builder().setTTL(100).build(), 123456L, null, null, "description1");
datasetSystemMetadataWriter.write();
MetadataRecord expected = new MetadataRecord(dsInstance, MetadataScope.SYSTEM, ImmutableMap.of(AppSystemMetadataWriter.ENTITY_NAME_KEY, dsInstance.getEntityName(), AbstractSystemMetadataWriter.DESCRIPTION_KEY, "description1", AbstractSystemMetadataWriter.CREATION_TIME_KEY, String.valueOf(123456L), AbstractSystemMetadataWriter.TTL_KEY, "100"), ImmutableSet.<String>of());
Assert.assertEquals(expected, store.getMetadata(MetadataScope.SYSTEM, dsInstance));
// Now remove TTL, and add dsType
datasetSystemMetadataWriter = new DatasetSystemMetadataWriter(store, dsInstance, DatasetProperties.EMPTY, null, "dsType", "description2");
datasetSystemMetadataWriter.write();
expected = new MetadataRecord(dsInstance, MetadataScope.SYSTEM, ImmutableMap.of(AppSystemMetadataWriter.ENTITY_NAME_KEY, dsInstance.getEntityName(), AbstractSystemMetadataWriter.DESCRIPTION_KEY, "description2", AbstractSystemMetadataWriter.CREATION_TIME_KEY, String.valueOf(123456L), DatasetSystemMetadataWriter.TYPE, "dsType"), ImmutableSet.<String>of());
Assert.assertEquals(expected, store.getMetadata(MetadataScope.SYSTEM, dsInstance));
store.removeMetadata(dsInstance);
}
Aggregations