use of io.cdap.cdap.api.metadata.Metadata in project cdap by caskdata.
the class SmartWorkflow method configure.
@Override
protected void configure() {
setName(NAME);
setDescription("Data Pipeline Workflow");
// If plugins were registered only at the application level, CDAP would not be able to fail the run early.
try {
spec = new BatchPipelineSpecGenerator(applicationConfigurer.getDeployedNamespace(), getConfigurer(), applicationConfigurer.getRuntimeConfigurer(), ImmutableSet.of(BatchSource.PLUGIN_TYPE), ImmutableSet.of(BatchSink.PLUGIN_TYPE, SparkSink.PLUGIN_TYPE, AlertPublisher.PLUGIN_TYPE), config.getEngine(), getConfigurer()).generateSpec(config);
} catch (ValidationException e) {
throw new IllegalArgumentException(String.format("Failed to configure pipeline: %s", e.getFailures().isEmpty() ? e.getMessage() : e.getFailures().iterator().next().getFullMessage()), e);
}
// append "_" to the connection name so it will not conflict with the system tag we add
Set<String> connectionsUsed = spec.getConnectionsUsed().stream().map(s -> "_" + s).collect(Collectors.toSet());
applicationConfigurer.emitMetadata(new Metadata(Collections.emptyMap(), connectionsUsed), MetadataScope.SYSTEM);
stageSpecs = new HashMap<>();
useSpark = config.getEngine() == Engine.SPARK;
for (StageSpec stageSpec : spec.getStages()) {
stageSpecs.put(stageSpec.getName(), stageSpec);
String pluginType = stageSpec.getPlugin().getType();
if (SparkCompute.PLUGIN_TYPE.equals(pluginType) || SparkSink.PLUGIN_TYPE.equals(pluginType)) {
useSpark = true;
}
}
plan = createPlan();
WorkflowProgramAdder programAdder = new TrunkProgramAdder(getConfigurer());
// single phase, just add the program directly
if (plan.getPhases().size() == 1) {
addProgram(plan.getPhases().keySet().iterator().next(), programAdder);
setWorkflowProperties();
return;
}
// Dag classes don't allow a 'dag' without connections
if (plan.getPhaseConnections().isEmpty()) {
WorkflowProgramAdder fork = programAdder.fork();
for (String phaseName : plan.getPhases().keySet()) {
addProgram(phaseName, fork);
}
fork.join();
setWorkflowProperties();
return;
}
/*
ControlDag is used to flatten the dag that represents connections between phases.
Connections between phases represent a happens-before relationship, not the flow of data.
As such, phases can be shifted around as long as every happens-before relationship is maintained.
The exception is condition phases. Connection from a condition to another phase must be maintained as is.
Flattening a ControlDag will transform a dag into a special fork-join dag by moving phases around.
We therefore cannot blindly flatten the phase connections.
However, we validated earlier that condition outputs have a special property, where every stage following a
condition can only have a single input. This means we will never need to flatten anything after the first
set of conditions. We will only have to flatten what comes before the first set of conditions.
*/
dag = new ControlDag(plan.getPhaseConnections());
boolean dummyNodeAdded = false;
Map<String, ConditionBranches> conditionBranches = plan.getConditionPhaseBranches();
if (conditionBranches.isEmpty()) {
// after flattening, there is guaranteed to be just one source
dag.flatten();
} else if (!conditionBranches.keySet().containsAll(dag.getSources())) {
// Continue only if the condition node is not the source of the dag, otherwise dag is already in the
// required form
Set<String> conditions = conditionBranches.keySet();
// flatten only the part of the dag starting from sources and ending in conditions/sinks.
Set<String> dagNodes = dag.accessibleFrom(dag.getSources(), Sets.union(dag.getSinks(), conditions));
Set<String> dagNodesWithoutCondition = Sets.difference(dagNodes, conditions);
Set<Connection> connections = new HashSet<>();
Deque<String> bfs = new LinkedList<>();
Set<String> sinks = new HashSet<>();
// If its a single phase without condition then no need to flatten
if (dagNodesWithoutCondition.size() < 2) {
sinks.addAll(dagNodesWithoutCondition);
} else {
/*
Create a subdag from dagNodesWithoutCondition.
There are a couple situations where this is not immediately possible. For example:
source1 --|
|--> condition -- ...
source2 --|
Here, dagNodesWithoutCondition = [source1, source2], which is an invalid dag. Similarly:
source --> condition -- ...
Here, dagNodesWithoutCondition = [source], which is also invalid. In order to ensure that we have a
valid dag, we just insert a dummy node as the first node in the subdag, adding a connection from the
dummy node to all the sources.
*/
Dag subDag;
try {
subDag = dag.createSubDag(dagNodesWithoutCondition);
} catch (IllegalArgumentException | DisjointConnectionsException e) {
// DisjointConnectionsException thrown when islands are created from the dagNodesWithoutCondition
// IllegalArgumentException thrown when connections are empty
// In both cases we need to add dummy node and create connected Dag
String dummyNode = "dummy";
dummyNodeAdded = true;
Set<Connection> subDagConnections = new HashSet<>();
for (String source : dag.getSources()) {
subDagConnections.add(new Connection(dummyNode, source));
}
Deque<String> subDagBFS = new LinkedList<>();
subDagBFS.addAll(dag.getSources());
while (subDagBFS.peek() != null) {
String node = subDagBFS.poll();
for (String output : dag.getNodeOutputs(node)) {
if (dagNodesWithoutCondition.contains(output)) {
subDagConnections.add(new Connection(node, output));
subDagBFS.add(output);
}
}
}
subDag = new Dag(subDagConnections);
}
ControlDag cdag = new ControlDag(subDag);
cdag.flatten();
// Add all connections from cdag
bfs.addAll(cdag.getSources());
while (bfs.peek() != null) {
String node = bfs.poll();
for (String output : cdag.getNodeOutputs(node)) {
connections.add(new Connection(node, output));
bfs.add(output);
}
}
sinks.addAll(cdag.getSinks());
}
// Add back the existing condition nodes and corresponding conditions
Set<String> conditionsFromDag = Sets.intersection(dagNodes, conditions);
for (String condition : conditionsFromDag) {
connections.add(new Connection(sinks.iterator().next(), condition));
}
bfs.addAll(Sets.intersection(dagNodes, conditions));
while (bfs.peek() != null) {
String node = bfs.poll();
ConditionBranches branches = conditionBranches.get(node);
if (branches == null) {
// not a condition node. add outputs
for (String output : dag.getNodeOutputs(node)) {
connections.add(new Connection(node, output));
bfs.add(output);
}
} else {
// condition node
for (Boolean condition : Arrays.asList(true, false)) {
String phase = condition ? branches.getTrueOutput() : branches.getFalseOutput();
if (phase == null) {
continue;
}
connections.add(new Connection(node, phase, condition));
bfs.add(phase);
}
}
}
dag = new ControlDag(connections);
}
if (dummyNodeAdded) {
WorkflowProgramAdder fork = programAdder.fork();
String dummyNode = dag.getSources().iterator().next();
// need to make sure we don't call also() if this is the final branch
Iterator<String> outputIter = dag.getNodeOutputs(dummyNode).iterator();
addBranchPrograms(outputIter.next(), fork, false);
while (outputIter.hasNext()) {
fork = fork.also();
addBranchPrograms(outputIter.next(), fork, !outputIter.hasNext());
}
} else {
String start = dag.getSources().iterator().next();
addPrograms(start, programAdder);
}
setWorkflowProperties();
}
use of io.cdap.cdap.api.metadata.Metadata in project cdap by caskdata.
the class SystemMetadataAuditPublishTest method addAllSystemMetadata.
private int addAllSystemMetadata(Set<String> allMetadata) {
for (AuditMessage auditMessage : getMetadataUpdateMessages()) {
AuditPayload payload = auditMessage.getPayload();
Assert.assertTrue(payload instanceof MetadataPayload);
MetadataPayload metadataPayload = (MetadataPayload) payload;
Map<MetadataScope, Metadata> additions = metadataPayload.getAdditions();
if (additions.containsKey(MetadataScope.SYSTEM)) {
allMetadata.addAll(additions.get(MetadataScope.SYSTEM).getProperties().keySet());
allMetadata.addAll(additions.get(MetadataScope.SYSTEM).getTags());
}
Map<MetadataScope, Metadata> deletions = metadataPayload.getDeletions();
if (deletions.containsKey(MetadataScope.SYSTEM)) {
allMetadata.addAll(deletions.get(MetadataScope.SYSTEM).getProperties().keySet());
allMetadata.addAll(deletions.get(MetadataScope.SYSTEM).getTags());
}
}
return allMetadata.size();
}
use of io.cdap.cdap.api.metadata.Metadata in project cdap by caskdata.
the class MetadataHttpHandlerTestRun method testSearchMetadata.
@Test
public void testSearchMetadata() throws Exception {
appClient.deploy(NamespaceId.DEFAULT, createAppJarFile(AllProgramsApp.class));
// wait for the system metadata to be processed
ApplicationId appId = NamespaceId.DEFAULT.app(AllProgramsApp.NAME);
DatasetId datasetId = NamespaceId.DEFAULT.dataset(AllProgramsApp.DATASET_NAME);
Tasks.waitFor(false, () -> getProperties(appId, MetadataScope.SYSTEM).isEmpty(), 10, TimeUnit.SECONDS);
Tasks.waitFor(false, () -> getProperties(datasetId, MetadataScope.SYSTEM).isEmpty(), 10, TimeUnit.SECONDS);
Map<NamespacedEntityId, Metadata> expectedUserMetadata = new HashMap<>();
// Add metadata to app
Map<String, String> props = ImmutableMap.of("key1", "value1");
Set<String> tags = ImmutableSet.of("tag1", "tag2");
addProperties(appId, props);
addTags(appId, tags);
expectedUserMetadata.put(appId, new Metadata(props, tags));
// Add metadata to dataset
props = ImmutableMap.of("key10", "value10", "key11", "value11");
tags = ImmutableSet.of("tag11");
addProperties(datasetId, props);
addTags(datasetId, tags);
expectedUserMetadata.put(datasetId, new Metadata(props, tags));
Set<MetadataSearchResultRecord> results = searchMetadata(NamespaceId.DEFAULT, "value*").getResults();
// Verify results
Assert.assertEquals(expectedUserMetadata.keySet(), extractEntityIds(results));
for (MetadataSearchResultRecord result : results) {
// User metadata has to match exactly since we know what we have set
Assert.assertEquals(expectedUserMetadata.get(result.getEntityId()), result.getMetadata().get(MetadataScope.USER));
// Make sure system metadata is returned, we cannot check for exact match since we haven't set it
Metadata systemMetadata = result.getMetadata().get(MetadataScope.SYSTEM);
Assert.assertNotNull(systemMetadata);
Assert.assertFalse(systemMetadata.getProperties().isEmpty());
Assert.assertFalse(systemMetadata.getTags().isEmpty());
}
// add metadata to field (custom entity)
props = ImmutableMap.of("fKey1", "fValue1", "fKey2", "fValue2");
tags = ImmutableSet.of("fTag1");
MetadataEntity metadataEntity = MetadataEntity.builder(datasetId.toMetadataEntity()).appendAsType("field", "someField").build();
addProperties(metadataEntity, props);
addTags(metadataEntity, tags);
Map<MetadataEntity, Metadata> expectedUserMetadataV2 = new HashMap<>();
expectedUserMetadataV2.put(metadataEntity, new Metadata(props, tags));
Set<MetadataSearchResultRecord> resultsV2 = super.searchMetadata(ImmutableList.of(NamespaceId.DEFAULT), "fValue*", ImmutableSet.of(), null, 0, Integer.MAX_VALUE, 0, null, false).getResults();
// Verify results
Assert.assertEquals(expectedUserMetadataV2.keySet(), ImmutableSet.copyOf(extractMetadataEntities(resultsV2)));
for (MetadataSearchResultRecord result : resultsV2) {
// User metadata has to match exactly since we know what we have set
Assert.assertEquals(expectedUserMetadataV2.get(result.getMetadataEntity()), result.getMetadata().get(MetadataScope.USER));
// Make sure system metadata is returned, we cannot check for exact match since we haven't set it
Metadata systemMetadata = result.getMetadata().get(MetadataScope.SYSTEM);
// custom entity should not have any system metadata for it
Assert.assertNull(systemMetadata);
}
}
use of io.cdap.cdap.api.metadata.Metadata in project cdap by caskdata.
the class MetadataHttpHandlerTestRun method testCrossNamespaceSearchMetadata.
@Test
public void testCrossNamespaceSearchMetadata() throws Exception {
NamespaceId namespace1 = new NamespaceId("ns1");
namespaceClient.create(new NamespaceMeta.Builder().setName(namespace1).build());
NamespaceId namespace2 = new NamespaceId("ns2");
namespaceClient.create(new NamespaceMeta.Builder().setName(namespace2).build());
try {
appClient.deploy(namespace1, createAppJarFile(AllProgramsApp.class));
appClient.deploy(namespace2, createAppJarFile(AllProgramsApp.class));
// Add metadata to app
Map<String, String> props = ImmutableMap.of("key1", "value1");
Metadata meta = new Metadata(props, Collections.emptySet());
ApplicationId app1Id = namespace1.app(AllProgramsApp.NAME);
addProperties(app1Id, props);
ApplicationId app2Id = namespace2.app(AllProgramsApp.NAME);
addProperties(app2Id, props);
MetadataSearchResponse results = super.searchMetadata(ImmutableList.of(), "value*", Collections.emptySet(), null, 0, 10, 0, null, false);
Map<MetadataEntity, Metadata> expected = new HashMap<>();
expected.put(app1Id.toMetadataEntity(), meta);
expected.put(app2Id.toMetadataEntity(), meta);
Map<MetadataEntity, Metadata> actual = new HashMap<>();
for (MetadataSearchResultRecord record : results.getResults()) {
actual.put(record.getMetadataEntity(), record.getMetadata().get(MetadataScope.USER));
}
Assert.assertEquals(expected, actual);
} finally {
namespaceClient.delete(namespace1);
namespaceClient.delete(namespace2);
}
}
use of io.cdap.cdap.api.metadata.Metadata in project cdap by caskdata.
the class RemoteMetadataReader method getMetadata.
@Override
public Map<MetadataScope, Metadata> getMetadata(MetadataEntity metadataEntity) throws MetadataException {
Map<MetadataScope, Metadata> scopeMetadata = new HashMap<>();
Set<MetadataRecord> metadata;
try {
metadata = metadataClient.getMetadata(metadataEntity);
} catch (ServiceUnavailableException e) {
throw e;
} catch (Exception e) {
throw new MetadataException(e);
}
metadata.forEach(record -> scopeMetadata.put(record.getScope(), new Metadata(record.getProperties(), record.getTags())));
LOG.trace("Returning metadata record {} for {}", scopeMetadata, metadataEntity);
return scopeMetadata;
}
Aggregations