use of co.cask.cdap.data2.metadata.dataset.MetadataDataset in project cdap by caskdata.
the class DefaultMetadataStore method addTags.
/**
* Adds tags for the specified {@link NamespacedEntityId}.
*/
@Override
public void addTags(final MetadataScope scope, final NamespacedEntityId namespacedEntityId, final String... tagsToAdd) {
final AtomicReference<MetadataRecord> previousRef = new AtomicReference<>();
execute(new TransactionExecutor.Procedure<MetadataDataset>() {
@Override
public void apply(MetadataDataset input) throws Exception {
Map<String, String> existingProperties = input.getProperties(namespacedEntityId);
Set<String> existingTags = input.getTags(namespacedEntityId);
previousRef.set(new MetadataRecord(namespacedEntityId, scope, existingProperties, existingTags));
input.addTags(namespacedEntityId, tagsToAdd);
}
}, scope);
publishAudit(previousRef.get(), new MetadataRecord(namespacedEntityId, scope, EMPTY_PROPERTIES, Sets.newHashSet(tagsToAdd)), new MetadataRecord(namespacedEntityId, scope));
}
use of co.cask.cdap.data2.metadata.dataset.MetadataDataset in project cdap by caskdata.
the class MetadataHttpHandler method searchMetadata.
@GET
@Path("/namespaces/{namespace-id}/metadata/search")
public void searchMetadata(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @QueryParam("query") String searchQuery, @QueryParam("target") List<String> targets, @QueryParam("sort") @DefaultValue("") String sort, @QueryParam("offset") @DefaultValue("0") int offset, // 2147483647 is Integer.MAX_VALUE
@QueryParam("limit") @DefaultValue("2147483647") int limit, @QueryParam("numCursors") @DefaultValue("0") int numCursors, @QueryParam("cursor") @DefaultValue("") String cursor, @QueryParam("showHidden") @DefaultValue("false") boolean showHidden, @Nullable @QueryParam("entityScope") String entityScope) throws Exception {
if (searchQuery == null || searchQuery.isEmpty()) {
throw new BadRequestException("query is not specified");
}
Set<EntityTypeSimpleName> types = Collections.emptySet();
if (targets != null) {
types = ImmutableSet.copyOf(Iterables.transform(targets, STRING_TO_TARGET_TYPE));
}
SortInfo sortInfo = SortInfo.of(URLDecoder.decode(sort, "UTF-8"));
if (SortInfo.DEFAULT.equals(sortInfo)) {
if (!(cursor.isEmpty()) || 0 != numCursors) {
throw new BadRequestException("Cursors are not supported when sort info is not specified.");
}
}
try {
MetadataSearchResponse response = metadataAdmin.search(namespaceId, URLDecoder.decode(searchQuery, "UTF-8"), types, sortInfo, offset, limit, numCursors, cursor, showHidden, validateEntityScope(entityScope));
responder.sendJson(HttpResponseStatus.OK, response, MetadataSearchResponse.class, GSON);
} catch (Exception e) {
// if MetadataDataset throws an exception, it gets wrapped
if (Throwables.getRootCause(e) instanceof BadRequestException) {
throw new BadRequestException(e.getMessage(), e);
}
throw e;
}
}
use of co.cask.cdap.data2.metadata.dataset.MetadataDataset in project cdap by caskdata.
the class MetadataDataset method rebuildIndexes.
/**
* Rebuilds all the indexes in the {@link MetadataDataset} in batches.
*
* @param startRowKey the key of the row to start the scan for the current batch with
* @param limit the batch size
* @return the row key of the last row scanned in the current batch, {@code null} if there are no more rows to scan.
*/
@Nullable
public byte[] rebuildIndexes(@Nullable byte[] startRowKey, int limit) {
// Now rebuild indexes for all values in the metadata dataset
byte[] valueRowPrefix = MdsKey.getValueRowPrefix();
// If startRow is null, start at the beginning, else start at the provided start row
startRowKey = startRowKey == null ? valueRowPrefix : startRowKey;
// stopRowKey will always be the last row key with the valueRowPrefix
byte[] stopRowKey = Bytes.stopKeyForPrefix(valueRowPrefix);
Row row;
try (Scanner scanner = indexedTable.scan(startRowKey, stopRowKey)) {
while ((limit > 0) && (row = scanner.next()) != null) {
byte[] rowKey = row.getRow();
String targetType = MdsKey.getTargetType(rowKey);
NamespacedEntityId namespacedEntityId = MdsKey.getNamespacedIdFromKey(targetType, rowKey);
String metadataKey = MdsKey.getMetadataKey(targetType, rowKey);
Set<Indexer> indexers = getIndexersForKey(metadataKey);
MetadataEntry metadataEntry = getMetadata(namespacedEntityId, metadataKey);
if (metadataEntry == null) {
LOG.warn("Found null metadata entry for a known metadata key {} for entity {} which has an index stored. " + "Ignoring.", metadataKey, namespacedEntityId);
continue;
}
// storeIndexes deletes old indexes
storeIndexes(namespacedEntityId, metadataKey, indexers, metadataEntry);
limit--;
}
Row startRowForNextBatch = scanner.next();
if (startRowForNextBatch == null) {
return null;
}
return startRowForNextBatch.getRow();
}
}
use of co.cask.cdap.data2.metadata.dataset.MetadataDataset in project cdap by caskdata.
the class DefaultMetadataStore method setProperties.
/**
* Adds/updates metadata for the specified {@link NamespacedEntityId}.
*/
@Override
public void setProperties(final MetadataScope scope, final NamespacedEntityId namespacedEntityId, final Map<String, String> properties) {
final AtomicReference<MetadataRecord> previousRef = new AtomicReference<>();
execute(new TransactionExecutor.Procedure<MetadataDataset>() {
@Override
public void apply(MetadataDataset input) throws Exception {
Map<String, String> existingProperties = input.getProperties(namespacedEntityId);
Set<String> existingTags = input.getTags(namespacedEntityId);
previousRef.set(new MetadataRecord(namespacedEntityId, scope, existingProperties, existingTags));
for (Map.Entry<String, String> entry : properties.entrySet()) {
input.setProperty(namespacedEntityId, entry.getKey(), entry.getValue());
}
}
}, scope);
final ImmutableMap.Builder<String, String> propAdditions = ImmutableMap.builder();
final ImmutableMap.Builder<String, String> propDeletions = ImmutableMap.builder();
MetadataRecord previousRecord = previousRef.get();
// Iterating over properties all over again, because we want to move the diff calculation outside the transaction.
for (Map.Entry<String, String> entry : properties.entrySet()) {
String existingValue = previousRecord.getProperties().get(entry.getKey());
if (existingValue != null && existingValue.equals(entry.getValue())) {
// Value already exists and is the same as the value being passed. No update necessary.
continue;
}
// If it is an update, then mark a single deletion.
if (existingValue != null) {
propDeletions.put(entry.getKey(), existingValue);
}
// In both update or new cases, mark a single addition.
propAdditions.put(entry.getKey(), entry.getValue());
}
publishAudit(previousRecord, new MetadataRecord(namespacedEntityId, scope, propAdditions.build(), EMPTY_TAGS), new MetadataRecord(namespacedEntityId, scope, propDeletions.build(), EMPTY_TAGS));
}
use of co.cask.cdap.data2.metadata.dataset.MetadataDataset in project cdap by caskdata.
the class DefaultMetadataStore method execute.
private <T> T execute(TransactionExecutor.Function<MetadataDataset, T> func, MetadataScope scope) {
MetadataDataset metadataDataset = newMetadataDataset(scope);
TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, metadataDataset);
return txExecutor.executeUnchecked(func, metadataDataset);
}
Aggregations