use of io.cdap.cdap.api.metadata.MetadataScope.SYSTEM in project cdap by caskdata.
the class MetadataStorageTest method testSortedSearchAndPagination.
@Test
public void testSortedSearchAndPagination() throws IOException {
MetadataStorage mds = getMetadataStorage();
// create 10 unique random entity ids with random creation times
NoDupRandom random = new NoDupRandom();
List<MetadataEntity> entities = new ArrayList<>();
for (int i = 0; i < 10; i++) {
entities.add(ofDataset("myns", "ds" + String.valueOf(random.nextInt(1000))));
}
long creationTime = System.currentTimeMillis() - TimeUnit.MINUTES.toMillis(60);
List<MetadataRecord> records = entities.stream().map(entity -> new MetadataRecord(entity, new Metadata(SYSTEM, props(MetadataConstants.CREATION_TIME_KEY, String.valueOf(creationTime + random.nextInt(1000000)), MetadataConstants.ENTITY_NAME_KEY, entity.getValue(entity.getType()))))).collect(Collectors.toList());
// index all entities
mds.batch(records.stream().map(record -> new MetadataMutation.Update(record.getEntity(), record.getMetadata())).collect(Collectors.toList()), MutationOptions.DEFAULT);
testSortedSearch(mds, records, ENTITY_NAME_KEY);
testSortedSearch(mds, records, CREATION_TIME_KEY);
// clean up
mds.batch(entities.stream().map(Drop::new).collect(Collectors.toList()), MutationOptions.DEFAULT);
}
use of io.cdap.cdap.api.metadata.MetadataScope.SYSTEM in project cdap by caskdata.
the class MetadataStorageTest method testBatch.
@Test
public void testBatch() throws IOException {
MetadataEntity entity = MetadataEntity.ofDataset("a", "b");
Map<ScopedNameOfKind, MetadataDirective> directives = ImmutableMap.of(new ScopedNameOfKind(PROPERTY, SYSTEM, CREATION_TIME_KEY), MetadataDirective.PRESERVE, new ScopedNameOfKind(PROPERTY, SYSTEM, DESCRIPTION_KEY), MetadataDirective.KEEP);
MetadataStorage mds = getMetadataStorage();
Create create = new Create(entity, new Metadata(SYSTEM, tags("batch"), props(CREATION_TIME_KEY, "12345678", DESCRIPTION_KEY, "hello", "other", "value")), directives);
MetadataChange change = mds.apply(create, MutationOptions.DEFAULT);
Assert.assertEquals(Metadata.EMPTY, change.getBefore());
Assert.assertEquals(create.getMetadata(), change.getAfter());
List<MetadataMutation> mutations = ImmutableList.of(new Update(entity, new Metadata(USER, tags("tag1", "tag2"))), new Drop(entity), new Create(entity, new Metadata(SYSTEM, tags("batch"), props(CREATION_TIME_KEY, "23456789", "other", "different")), directives), new Update(entity, new Metadata(USER, tags("tag3"), props("key", "value"))), new Remove(entity, ImmutableSet.of(new ScopedNameOfKind(PROPERTY, SYSTEM, "other"), new ScopedNameOfKind(TAG, USER, "tag2"))), new Create(entity, new Metadata(SYSTEM, tags("realtime"), props(CREATION_TIME_KEY, "33456789", DESCRIPTION_KEY, "new description", "other", "yet other")), directives));
// apply all mutations in sequence
List<MetadataChange> changes = mutations.stream().map(mutation -> {
try {
return mds.apply(mutation, MutationOptions.DEFAULT);
} catch (IOException e) {
throw Throwables.propagate(e);
}
}).collect(Collectors.toList());
// drop and recreate the entity
mds.apply(new Drop(entity), MutationOptions.DEFAULT);
change = mds.apply(create, MutationOptions.DEFAULT);
Assert.assertEquals(Metadata.EMPTY, change.getBefore());
Assert.assertEquals(create.getMetadata(), change.getAfter());
// apply all mutations in batch
List<MetadataChange> batchChanges = mds.batch(mutations, MutationOptions.DEFAULT);
// make sure the same mutations were applied
Assert.assertEquals(changes, batchChanges);
// clean up
mds.apply(new Drop(entity), MutationOptions.DEFAULT);
}
use of io.cdap.cdap.api.metadata.MetadataScope.SYSTEM in project cdap by caskdata.
the class MetadataStorageTest method testCursorsOffsetsAndTotals.
@Test
public void testCursorsOffsetsAndTotals() throws IOException {
MetadataStorage mds = getMetadataStorage();
List<MetadataRecord> records = IntStream.range(0, 20).mapToObj(i -> new MetadataRecord(ofDataset(DEFAULT_NAMESPACE, "ds" + i), new Metadata(SYSTEM, props(ENTITY_NAME_KEY, "ds" + i)))).collect(Collectors.toList());
mds.batch(records.stream().map(record -> new Update(record.getEntity(), record.getMetadata())).collect(Collectors.toList()), MutationOptions.DEFAULT);
// no cursors
validateCursorAndOffset(mds, 0, 10, null, false, 10, 0, 10, true, false);
validateCursorAndOffset(mds, 5, 10, null, false, 10, 5, 10, true, false);
validateCursorAndOffset(mds, 10, 10, null, false, 10, 10, 10, false, false);
validateCursorAndOffset(mds, 15, 10, null, false, 5, 15, 10, false, false);
validateCursorAndOffset(mds, 20, 10, null, false, 0, 20, 10, false, false);
validateCursorAndOffset(mds, 25, 10, null, false, 0, 25, 10, false, false);
// request cursors, but don't use them
validateCursorAndOffset(mds, 0, 10, null, true, 10, 0, 10, true, true);
validateCursorAndOffset(mds, 0, 20, null, true, 20, 0, 20, false, false);
validateCursorAndOffset(mds, 0, 30, null, true, 20, 0, 30, false, false);
// test that passing in an empty string as the cursor has the same effect as null
validateCursorAndOffset(mds, 0, 10, "", true, 10, 0, 10, true, true);
validateCursorAndOffset(mds, 0, 20, "", true, 20, 0, 20, false, false);
validateCursorAndOffset(mds, 0, 30, "", true, 20, 0, 30, false, false);
// request cursor, and use it
String cursor = validateCursorAndOffset(mds, 0, 8, null, true, 8, 0, 8, true, true);
cursor = validateCursorAndOffset(mds, 0, 8, cursor, true, 8, 8, 8, true, true);
validateCursorAndOffset(mds, 0, 8, cursor, true, 4, 16, 8, false, false);
// request a cursor that matches evenly with the number of results
cursor = validateCursorAndOffset(mds, 0, 10, null, true, 10, 0, 10, true, true);
validateCursorAndOffset(mds, 0, 10, cursor, true, 10, 10, 10, false, false);
// ensure that offset and limit are superseded by cursor
cursor = validateCursorAndOffset(mds, 0, 4, null, true, 4, 0, 4, true, true);
cursor = validateCursorAndOffset(mds, 0, 0, cursor, true, 4, 4, 4, true, true);
cursor = validateCursorAndOffset(mds, 10, 100, cursor, true, 4, 8, 4, true, true);
cursor = validateCursorAndOffset(mds, 12, 2, cursor, true, 4, 12, 4, true, true);
validateCursorAndOffset(mds, 1, 1, cursor, true, 4, 16, 4, false, false);
// ensure that we can start searching without cursor, with offset, and request a cursor
// whether a cursor is returned, is implementation dependent
cursor = validateCursorAndOffset(mds, 4, 4, null, true, 4, 4, 4, true, null);
validateCursorAndOffset(mds, 8, 4, cursor, true, 4, 8, 4, true, null);
// clean up
mds.batch(records.stream().map(MetadataRecord::getEntity).map(Drop::new).collect(Collectors.toList()), MutationOptions.DEFAULT);
}
use of io.cdap.cdap.api.metadata.MetadataScope.SYSTEM in project cdap by caskdata.
the class DatasetMetadataStorageTest method testSearchWeight.
// this tests is not in MetadataStorageTest,
// because it tests result scoring and sorting specific to the dataset-based implementation
@Test
public void testSearchWeight() throws IOException {
MetadataStorage mds = getMetadataStorage();
String ns = "ns1";
NamespaceId nsId = new NamespaceId(ns);
MetadataEntity service1 = nsId.app("app1").service("service1").toMetadataEntity();
MetadataEntity dataset1 = nsId.dataset("ds1").toMetadataEntity();
MetadataEntity dataset2 = nsId.dataset("ds2").toMetadataEntity();
// Add metadata
String multiWordValue = "aV1 av2 , - , av3 - av4_av5 av6";
Map<String, String> userProps = ImmutableMap.of("key1", "value1", "key2", "value2", "multiword", multiWordValue);
Map<String, String> systemProps = ImmutableMap.of("sysKey1", "sysValue1");
Set<String> userTags = ImmutableSet.of("tag1", "tag2");
Set<String> temporaryUserTags = ImmutableSet.of("tag3", "tag4");
Map<String, String> dataset1UserProps = ImmutableMap.of("sKey1", "sValuee1 sValuee2");
Map<String, String> dataset2UserProps = ImmutableMap.of("sKey1", "sValue1 sValue2", "Key1", "Value1");
Set<String> sysTags = ImmutableSet.of("sysTag1");
MetadataRecord service1Record = new MetadataRecord(service1, union(new Metadata(USER, userTags, userProps), new Metadata(SYSTEM, sysTags, systemProps)));
mds.apply(new Update(service1Record.getEntity(), service1Record.getMetadata()), MutationOptions.DEFAULT);
// dd and then remove some metadata for dataset2
mds.apply(new Update(dataset2, new Metadata(USER, temporaryUserTags, userProps)), MutationOptions.DEFAULT);
mds.apply(new Remove(dataset2, temporaryUserTags.stream().map(tag -> new ScopedNameOfKind(TAG, USER, tag)).collect(Collectors.toSet())), MutationOptions.DEFAULT);
mds.apply(new Remove(dataset2, userProps.keySet().stream().map(tag -> new ScopedNameOfKind(PROPERTY, USER, tag)).collect(Collectors.toSet())), MutationOptions.DEFAULT);
MetadataRecord dataset1Record = new MetadataRecord(dataset1, new Metadata(USER, tags(), dataset1UserProps));
MetadataRecord dataset2Record = new MetadataRecord(dataset2, new Metadata(USER, tags(), dataset2UserProps));
mds.batch(ImmutableList.of(new Update(dataset1Record.getEntity(), dataset1Record.getMetadata()), new Update(dataset2Record.getEntity(), dataset2Record.getMetadata())), MutationOptions.DEFAULT);
// Test score and metadata match
assertInOrder(mds, SearchRequest.of("value1 multiword:av2").addNamespace(ns).build(), service1Record, dataset2Record);
assertInOrder(mds, SearchRequest.of("value1 sValue*").addNamespace(ns).setLimit(Integer.MAX_VALUE).build(), dataset2Record, dataset1Record, service1Record);
assertResults(mds, SearchRequest.of("*").addNamespace(ns).setLimit(Integer.MAX_VALUE).build(), dataset2Record, dataset1Record, service1Record);
// clean up
mds.batch(ImmutableList.of(new Drop(service1), new Drop(dataset1), new Drop(dataset2)), MutationOptions.DEFAULT);
}
Aggregations