use of io.cdap.cdap.proto.id.NamespacedEntityId in project cdap by caskdata.
the class OwnerStoreTest method testGetOwners.
@Test
public void testGetOwners() throws IOException, AlreadyExistsException {
OwnerStore ownerStore = getOwnerStore();
ownerStore.add(NamespaceId.DEFAULT.dataset("dataset"), new KerberosPrincipalId("ds"));
ownerStore.add(NamespaceId.DEFAULT.app("app"), new KerberosPrincipalId("app"));
ownerStore.add(NamespaceId.DEFAULT.artifact("artifact", "1.2.3"), new KerberosPrincipalId("artifact"));
Set<NamespacedEntityId> ids = ImmutableSet.of(NamespaceId.DEFAULT.dataset("dataset"), NamespaceId.DEFAULT.app("app"), NamespaceId.DEFAULT.artifact("artifact", "1.2.3"), NamespaceId.DEFAULT.app("noowner"));
Map<NamespacedEntityId, KerberosPrincipalId> owners = ownerStore.getOwners(ids);
Assert.assertEquals(3, owners.size());
Assert.assertEquals(new KerberosPrincipalId("ds"), owners.get(NamespaceId.DEFAULT.dataset("dataset")));
Assert.assertEquals(new KerberosPrincipalId("app"), owners.get(NamespaceId.DEFAULT.app("app")));
Assert.assertEquals(new KerberosPrincipalId("artifact"), owners.get(NamespaceId.DEFAULT.artifact("artifact", "1.2.3")));
Assert.assertNull(owners.get(NamespaceId.DEFAULT.app("noowner")));
}
use of io.cdap.cdap.proto.id.NamespacedEntityId in project cdap by caskdata.
the class ProfileMetadataMessageProcessor method removeProfileMetadata.
/**
* Remove the profile metadata according to the message, currently only meant for application and schedule.
*/
private void removeProfileMetadata(MetadataMessage message) throws IOException {
EntityId entity = message.getEntityId();
List<MetadataMutation> deletes = new ArrayList<>();
// We only care about application and schedules.
if (entity.getEntityType().equals(EntityType.APPLICATION)) {
LOG.trace("Removing profile metadata for {}", entity);
ApplicationId appId = (ApplicationId) message.getEntityId();
ApplicationSpecification appSpec = message.getPayload(GSON, ApplicationSpecification.class);
for (ProgramId programId : getAllProfileAllowedPrograms(appSpec, appId)) {
addProfileMetadataDelete(programId, deletes);
}
for (ScheduleId scheduleId : getSchedulesInApp(appId, appSpec.getProgramSchedules())) {
addProfileMetadataDelete(scheduleId, deletes);
}
addPluginMetadataDelete(appId, appSpec, deletes);
} else if (entity.getEntityType().equals(EntityType.SCHEDULE)) {
addProfileMetadataDelete((NamespacedEntityId) entity, deletes);
}
if (!deletes.isEmpty()) {
metadataStorage.batch(deletes, MutationOptions.DEFAULT);
}
}
use of io.cdap.cdap.proto.id.NamespacedEntityId in project cdap by caskdata.
the class LineageLimitingTest method testLineageLimiting.
@Test
public void testLineageLimiting() throws InterruptedException, ExecutionException, TimeoutException {
LineageStoreReader lineageReader = getInjector().getInstance(LineageStoreReader.class);
ProgramRunId run1 = service1.run(RunIds.generate());
// Write out some lineage information
LineageWriter lineageWriter = getInjector().getInstance(MessagingLineageWriter.class);
lineageWriter.addAccess(run1, dataset1, AccessType.READ);
lineageWriter.addAccess(run1, dataset2, AccessType.WRITE);
// Write the field level lineage
FieldLineageWriter fieldLineageWriter = getInjector().getInstance(MessagingLineageWriter.class);
ProgramRunId spark1Run1 = spark1.run(RunIds.generate(100));
ReadOperation read = new ReadOperation("read", "some read", EndPoint.of("ns", "endpoint1"), "offset", "body");
TransformOperation parse = new TransformOperation("parse", "parse body", Collections.singletonList(InputField.of("read", "body")), "name", "address");
WriteOperation write = new WriteOperation("write", "write data", EndPoint.of("ns", "endpoint2"), Arrays.asList(InputField.of("read", "offset"), InputField.of("parse", "name"), InputField.of("parse", "address")));
List<Operation> operations = new ArrayList<>();
operations.add(read);
operations.add(write);
operations.add(parse);
FieldLineageInfo info1 = new FieldLineageInfo(operations);
fieldLineageWriter.write(spark1Run1, info1);
ProgramRunId spark1Run2 = spark1.run(RunIds.generate(200));
fieldLineageWriter.write(spark1Run2, info1);
// Verifies lineage has been written as it is smaller than maximum specified size
Set<NamespacedEntityId> expectedLineage = new HashSet<>(Arrays.asList(run1.getParent(), dataset1, dataset2));
Tasks.waitFor(true, () -> expectedLineage.equals(lineageReader.getEntitiesForRun(run1)), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
FieldLineageReader fieldLineageReader = getInjector().getInstance(FieldLineageReader.class);
// Verifies that empty lineage has been written
EndPointField endPointField = new EndPointField(EndPoint.of("ns", "endpoint2"), "offset");
List<ProgramRunOperations> incomingOperations = fieldLineageReader.getIncomingOperations(endPointField, 1L, Long.MAX_VALUE - 1);
Assert.assertTrue(incomingOperations.isEmpty());
}
use of io.cdap.cdap.proto.id.NamespacedEntityId in project cdap by caskdata.
the class MetadataDataset method parseRow.
// there may not be a MetadataEntry in the row or it may for a different targetType (entityFilter),
// so return an Optional
private Optional<MetadataEntry> parseRow(Row rowToProcess, String indexColumn, Set<String> entityFilter, boolean showHidden) {
String rowValue = rowToProcess.getString(indexColumn);
if (rowValue == null) {
return Optional.empty();
}
final byte[] rowKey = rowToProcess.getRow();
String targetType = MetadataKey.extractTargetType(rowKey);
// Filter on target type if not set to include all types
if (!entityFilter.isEmpty() && !entityFilter.contains(targetType)) {
return Optional.empty();
}
MetadataEntity metadataEntity = MetadataKey.extractMetadataEntityFromKey(rowKey);
try {
NamespacedEntityId namespacedEntityId = EntityId.fromMetadataEntity(metadataEntity);
// if the entity starts with _ then skip it unless the caller choose to showHidden.
if (!showHidden && namespacedEntityId != null && namespacedEntityId.getEntityName().startsWith("_")) {
return Optional.empty();
}
} catch (IllegalArgumentException e) {
// ignore. For custom entities we don't really want to hide them if they start with _
}
String key = MetadataKey.extractMetadataKey(rowKey);
MetadataEntry entry = getMetadata(metadataEntity, key);
return Optional.ofNullable(entry);
}
use of io.cdap.cdap.proto.id.NamespacedEntityId in project cdap by caskdata.
the class AuditPublishTest method testPublish.
@Test
public void testPublish() throws Exception {
String appName = AllProgramsApp.NAME;
ApplicationId appId = NamespaceId.DEFAULT.app(appName);
ApplicationSpecification spec = Specifications.from(new AllProgramsApp());
// Define expected values
Set<EntityId> expectedMetadataChangeEntities = new HashSet<>();
// Metadata change on the artifact and app
expectedMetadataChangeEntities.add(NamespaceId.DEFAULT.artifact(AllProgramsApp.class.getSimpleName(), "1"));
expectedMetadataChangeEntities.add(appId);
// All programs would have metadata change
for (ProgramType programType : ProgramType.values()) {
for (String programName : spec.getProgramsByType(programType)) {
io.cdap.cdap.proto.ProgramType internalProgramType = io.cdap.cdap.proto.ProgramType.valueOf(programType.name());
expectedMetadataChangeEntities.add(appId.program(internalProgramType, programName));
}
}
// All dataset would have metadata change as well as creation
Set<EntityId> expectedCreateEntities = new HashSet<>();
for (String dataset : spec.getDatasets().keySet()) {
expectedCreateEntities.add(NamespaceId.DEFAULT.dataset(dataset));
}
expectedMetadataChangeEntities.addAll(expectedCreateEntities);
// TODO (CDAP-14733): Scheduler doesn't publish CREATE audit events. Once it does, we must expect them here, too.
for (String schedule : spec.getProgramSchedules().keySet()) {
expectedMetadataChangeEntities.add(appId.schedule(schedule));
}
Multimap<AuditType, EntityId> expectedAuditEntities = HashMultimap.create();
expectedAuditEntities.putAll(AuditType.METADATA_CHANGE, expectedMetadataChangeEntities);
expectedAuditEntities.putAll(AuditType.CREATE, expectedCreateEntities);
// Deploy application
AppFabricTestHelper.deployApplication(Id.Namespace.DEFAULT, AllProgramsApp.class, null, cConf);
// Verify audit messages
Tasks.waitFor(expectedAuditEntities, () -> {
List<AuditMessage> publishedMessages = fetchAuditMessages();
Multimap<AuditType, EntityId> actualAuditEntities = HashMultimap.create();
for (AuditMessage message : publishedMessages) {
EntityId entityId = EntityId.fromMetadataEntity(message.getEntity());
if (entityId instanceof NamespacedEntityId) {
if (((NamespacedEntityId) entityId).getNamespace().equals(NamespaceId.SYSTEM.getNamespace())) {
// Ignore system audit messages
continue;
}
}
if (entityId.getEntityType() == EntityType.ARTIFACT && entityId instanceof ArtifactId) {
ArtifactId artifactId = (ArtifactId) entityId;
// Version is dynamic for deploys in test cases
entityId = Ids.namespace(artifactId.getNamespace()).artifact(artifactId.getArtifact(), "1");
}
actualAuditEntities.put(message.getType(), entityId);
}
return actualAuditEntities;
}, 5, TimeUnit.SECONDS);
}
Aggregations