use of io.cdap.cdap.api.metadata.MetadataEntity in project cdap by caskdata.
the class MetadataStorageTest method testSearchOnTTL.
@Test
public void testSearchOnTTL() throws Exception {
MetadataStorage mds = getMetadataStorage();
MetadataEntity ds = ofDataset("ns1", "ds");
Metadata metaWithTTL = new Metadata(SYSTEM, props(TTL_KEY, "3600"));
MetadataRecord dsRecord = new MetadataRecord(ds, metaWithTTL);
mds.apply(new Update(ds, metaWithTTL), MutationOptions.DEFAULT);
assertEmpty(mds, SearchRequest.of("ttl:3600").setScope(USER).build());
assertResults(mds, SearchRequest.of("ttl:3600").build(), dsRecord);
assertResults(mds, SearchRequest.of("ttl:3600").setScope(SYSTEM).build(), dsRecord);
List<String> moreQueries = new ArrayList<>(ImmutableList.of("3600", "properties:ttl", "ttl:*", "TTL:3600"));
moreQueries.addAll(getAdditionalTTLQueries());
for (String query : moreQueries) {
try {
assertResults(mds, SearchRequest.of(query).build(), dsRecord);
} catch (Throwable e) {
throw new Exception("Search failed for query: " + query);
}
}
mds.apply(new Drop(ds), MutationOptions.DEFAULT);
}
use of io.cdap.cdap.api.metadata.MetadataEntity in project cdap by caskdata.
the class MetadataStorageTest method testCrossNamespaceSearch.
@Test
public void testCrossNamespaceSearch() throws IOException {
MetadataStorage mds = getMetadataStorage();
String ns1 = "ns1";
String ns2 = "ns2";
MetadataEntity ns1app1 = ofApp(ns1, "a1");
MetadataEntity ns1app2 = ofApp(ns1, "a2");
MetadataEntity ns1app3 = ofApp(ns1, "a3");
MetadataEntity ns2app1 = ofApp(ns2, "a1");
MetadataEntity ns2app2 = ofApp(ns2, "a2");
MetadataRecord record11 = new MetadataRecord(ns1app1, new Metadata(USER, tags("v1"), props("k1", "v1"))), record12 = new MetadataRecord(ns1app2, new Metadata(USER, props("k1", "v1", "k2", "v2"))), record13 = new MetadataRecord(ns1app3, new Metadata(USER, props("k1", "v1", "k3", "v3"))), record21 = new MetadataRecord(ns2app1, new Metadata(USER, props("k1", "v1", "k2", "v2"))), record22 = new MetadataRecord(ns2app2, new Metadata(USER, tags("v2", "v3"), props("k1", "v1")));
MetadataRecord[] records = { record11, record12, record13, record21, record22 };
// apply all metadata in batch
mds.batch(Arrays.stream(records).map(record -> new Update(record.getEntity(), record.getMetadata())).collect(Collectors.toList()), MutationOptions.DEFAULT);
// everything should match 'v1'
assertResults(mds, SearchRequest.of("v1").setLimit(10).build(), record11, record12, record13, record21, record22);
// ns1app2, ns2app1, and ns2app2 should match 'v2'
assertResults(mds, SearchRequest.of("v2").setLimit(10).build(), record12, record21, record22);
// ns1app3 and ns2app2 should match 'v3'
assertResults(mds, SearchRequest.of("v3").setLimit(10).build(), record13, record22);
// clean up
mds.batch(batch(new Drop(ns1app1), new Drop(ns1app2), new Drop(ns1app3), new Drop(ns2app1), new Drop(ns2app2)), MutationOptions.DEFAULT);
}
use of io.cdap.cdap.api.metadata.MetadataEntity in project cdap by caskdata.
the class MetadataStorageTest method testSearchOnTags.
@Test
public void testSearchOnTags() throws Exception {
MetadataStorage mds = getMetadataStorage();
String ns1 = "ns1";
String ns2 = "ns2";
MetadataEntity app1 = ofApp(ns1, "app1");
MetadataEntity app2 = ofApp(ns2, "app1");
MetadataEntity program1 = ofWorker(app1, "wk1");
MetadataEntity dataset1 = ofDataset(ns1, "ds1");
MetadataEntity dataset2 = ofDataset(ns1, "ds2");
MetadataEntity file1 = MetadataEntity.builder(dataset1).appendAsType("file", "f1").build();
List<MetadataEntity> entities = ImmutableList.of(app1, app2, program1, dataset1, dataset2, file1);
for (MetadataEntity entity : entities) {
Assert.assertEquals(Metadata.EMPTY, mds.read(new Read(entity)));
}
// add tags for these entities
MetadataRecord app1Record = new MetadataRecord(app1, new Metadata(USER, tags("tag1", "tag2", "tag3")));
MetadataRecord app2Record = new MetadataRecord(app2, new Metadata(USER, tags("tag1", "tag2", "tag3_more")));
MetadataRecord program1Record = new MetadataRecord(program1, new Metadata(USER, tags("tag1")));
MetadataRecord dataset1Record = new MetadataRecord(dataset1, new Metadata(USER, tags("tag3", "tag2", "tag12-tag33")));
MetadataRecord dataset2Record = new MetadataRecord(dataset2, new Metadata(USER, tags("tag2", "tag4")));
MetadataRecord file1Record = new MetadataRecord(file1, new Metadata(USER, tags("tag2", "tag5")));
mds.batch(ImmutableList.of(app1Record, app2Record, program1Record, dataset1Record, dataset2Record, file1Record).stream().map(record -> new Update(record.getEntity(), record.getMetadata())).collect(Collectors.toList()), MutationOptions.DEFAULT);
// Try to search on all tags
assertResults(mds, SearchRequest.of("tags:*").addNamespace(ns1).build(), app1Record, program1Record, dataset1Record, dataset2Record, file1Record);
// Try to search for tag1*
assertResults(mds, SearchRequest.of("tags:tag1*").addNamespace(ns1).build(), app1Record, program1Record, dataset1Record);
// Try to search for tag1 with spaces in search query and mixed case of tags keyword
assertResults(mds, SearchRequest.of(" tAGS : tag1 ").addNamespace(ns1).build(), app1Record, program1Record);
// Try to search for tag5
assertResults(mds, SearchRequest.of("tags:tag5").addNamespace(ns1).build(), file1Record);
// Try to search for tag2
assertResults(mds, SearchRequest.of("tags:tag2").addNamespace(ns1).build(), app1Record, dataset1Record, dataset2Record, file1Record);
// Try to search for tag4
assertResults(mds, SearchRequest.of("tags:tag4").addNamespace(ns1).build(), dataset2Record);
// Try to search for tag33
assertResults(mds, SearchRequest.of("tags:tag33").addNamespace(ns1).build(), dataset1Record);
// Try to search for a tag which has - in it
assertResults(mds, SearchRequest.of("tag12-tag33").addNamespace(ns1).build(), dataset1Record);
// Try to search for tag33 with spaces in query
assertResults(mds, SearchRequest.of(" tag33 ").addNamespace(ns1).build(), dataset1Record);
// Try wildcard search for tag3*
assertResults(mds, SearchRequest.of("tags:tag3*").addNamespace(ns1).build(), app1Record, dataset1Record);
// try search in another namespace
assertResults(mds, SearchRequest.of("tags:tag1").addNamespace(ns2).build(), app2Record);
assertResults(mds, SearchRequest.of("tag3").addNamespace(ns2).build(), app2Record);
assertResults(mds, SearchRequest.of("tag*").addNamespace(ns2).build(), app2Record);
// try to search across namespaces
assertResults(mds, SearchRequest.of("tags:tag1").build(), app1Record, app2Record, program1Record);
// cleanup
mds.batch(entities.stream().map(Drop::new).collect(Collectors.toList()), MutationOptions.DEFAULT);
// Search should be empty after deleting tags
assertEmpty(mds, SearchRequest.of("*").setLimit(10).build());
}
use of io.cdap.cdap.api.metadata.MetadataEntity in project cdap by caskdata.
the class MetadataStorageTest method testSortedSearchAndPagination.
@Test
public void testSortedSearchAndPagination() throws IOException {
MetadataStorage mds = getMetadataStorage();
// create 10 unique random entity ids with random creation times
NoDupRandom random = new NoDupRandom();
List<MetadataEntity> entities = new ArrayList<>();
for (int i = 0; i < 10; i++) {
entities.add(ofDataset("myns", "ds" + String.valueOf(random.nextInt(1000))));
}
long creationTime = System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(60);
List<MetadataRecord> records = entities.stream().map(entity -> new MetadataRecord(entity, new Metadata(SYSTEM, props(MetadataConstants.CREATION_TIME_KEY, String.valueOf(creationTime + random.nextInt(1000000)), MetadataConstants.ENTITY_NAME_KEY, entity.getValue(entity.getType()))))).collect(Collectors.toList());
// index all entities
mds.batch(records.stream().map(record -> new MetadataMutation.Update(record.getEntity(), record.getMetadata())).collect(Collectors.toList()), MutationOptions.DEFAULT);
testSortedSearch(mds, records, ENTITY_NAME_KEY);
testSortedSearch(mds, records, CREATION_TIME_KEY);
// clean up
mds.batch(entities.stream().map(Drop::new).collect(Collectors.toList()), MutationOptions.DEFAULT);
}
use of io.cdap.cdap.api.metadata.MetadataEntity in project cdap by caskdata.
the class MetadataStorageTest method testBatchConcurrency.
@Test
public void testBatchConcurrency() throws IOException {
// T threads
int numThreads = 10;
// N entities
int numEntities = 20;
MetadataStorage mds = getMetadataStorage();
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
CompletionService<List<MetadataChange>> completionService = new ExecutorCompletionService<>(executor);
// Set up N entities with T tags (one to be removed per thread)
Set<String> allRTags = IntStream.range(0, numThreads).mapToObj(t -> "r" + t).collect(Collectors.toSet());
Map<Integer, MetadataEntity> entities = IntStream.range(0, numEntities).boxed().collect(Collectors.toMap(i -> i, i -> MetadataEntity.ofDataset("myds" + i)));
mds.batch(entities.values().stream().map(entity -> new Update(entity, new Metadata(USER, allRTags))).collect(Collectors.toList()), MutationOptions.DEFAULT);
// Generate a random but conflicting set of batches of mutations, one for each thread.
// Construct the expected results for each entity along with the mutations
// Also, because some threads will perform a Create, create a set of directives to preserve all other tags
Map<Integer, Set<String>> expected = IntStream.range(0, numEntities).boxed().collect(Collectors.toMap(i -> i, i -> new HashSet<>(allRTags)));
Map<Integer, List<MetadataMutation>> mutations = IntStream.range(0, numThreads).boxed().collect(Collectors.toMap(i -> i, i -> new ArrayList<>()));
Map<ScopedNameOfKind, MetadataDirective> directives = new HashMap<>();
Random rand = new Random(System.currentTimeMillis());
IntStream.range(0, numThreads).forEach(t -> {
directives.put(new ScopedNameOfKind(TAG, USER, "t" + t), MetadataDirective.KEEP);
directives.put(new ScopedNameOfKind(TAG, USER, "r" + t), MetadataDirective.KEEP);
directives.put(new ScopedNameOfKind(TAG, USER, "c" + t), MetadataDirective.KEEP);
IntStream.range(0, numEntities).forEach(e -> {
int random = rand.nextInt(100);
if (random < 30) {
expected.get(e).add("t" + t);
mutations.get(t).add(new Update(entities.get(e), new Metadata(USER, tags("t" + t))));
} else if (random < 60) {
expected.get(e).add("c" + t);
mutations.get(t).add(new Create(entities.get(e), new Metadata(USER, tags("c" + t)), directives));
} else if (random < 90) {
expected.get(e).remove("r" + t);
mutations.get(t).add(new Remove(entities.get(e), Collections.singleton(new ScopedNameOfKind(TAG, USER, "r" + t))));
}
});
});
// submit all tasks and wait for their completion
IntStream.range(0, numThreads).forEach(t -> completionService.submit(() -> mds.batch(mutations.get(t), MutationOptions.DEFAULT)));
IntStream.range(0, numThreads).forEach(t -> {
try {
completionService.take();
} catch (InterruptedException e) {
throw Throwables.propagate(e);
}
});
// validate that all "r" tags were removed and all "c" and "t" tags were added
IntStream.range(0, numEntities).forEach(e -> {
try {
Assert.assertEquals("For entity " + entities.get(e), expected.get(e), mds.read(new Read(entities.get(e))).getTags(USER));
} catch (Exception ex) {
throw Throwables.propagate(ex);
}
});
// clean up
mds.batch(entities.values().stream().map(Drop::new).collect(Collectors.toList()), MutationOptions.DEFAULT);
}
Aggregations