Search in sources :

Example 1 with Metadata

use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.

the class FileStreamAdmin method updateProperties.

private StreamProperties updateProperties(StreamId streamId, StreamProperties properties) throws Exception {
    StreamConfig config = getConfig(streamId);
    StreamConfig.Builder builder = StreamConfig.builder(config);
    if (properties.getTTL() != null) {
        builder.setTTL(properties.getTTL());
    }
    if (properties.getFormat() != null) {
        builder.setFormatSpec(properties.getFormat());
    }
    if (properties.getNotificationThresholdMB() != null) {
        builder.setNotificationThreshold(properties.getNotificationThresholdMB());
    }
    // update stream description
    String description = properties.getDescription();
    if (description != null) {
        streamMetaStore.addStream(streamId, description);
    }
    final StreamConfig newConfig = builder.build();
    impersonator.doAs(streamId, new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            writeConfig(newConfig);
            return null;
        }
    });
    // Update system metadata for stream
    SystemMetadataWriter systemMetadataWriter = new StreamSystemMetadataWriter(metadataStore, streamId, newConfig, description);
    systemMetadataWriter.write();
    return new StreamProperties(config.getTTL(), config.getFormat(), config.getNotificationThresholdMB());
}
Also used : StreamSystemMetadataWriter(co.cask.cdap.data2.metadata.system.StreamSystemMetadataWriter) SystemMetadataWriter(co.cask.cdap.data2.metadata.system.SystemMetadataWriter) StreamSystemMetadataWriter(co.cask.cdap.data2.metadata.system.StreamSystemMetadataWriter) StreamProperties(co.cask.cdap.proto.StreamProperties) CoordinatorStreamProperties(co.cask.cdap.data.stream.CoordinatorStreamProperties) NotificationFeedException(co.cask.cdap.notifications.feeds.NotificationFeedException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) NotFoundException(co.cask.cdap.common.NotFoundException) StreamNotFoundException(co.cask.cdap.common.StreamNotFoundException)

Example 2 with Metadata

use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.

the class HBaseMetadataTable method createTopic.

@Override
public void createTopic(TopicMetadata topicMetadata) throws TopicAlreadyExistsException, IOException {
    TopicId topicId = topicMetadata.getTopicId();
    byte[] rowKey = MessagingUtils.toMetadataRowKey(topicId);
    PutBuilder putBuilder = tableUtil.buildPut(rowKey);
    Get get = tableUtil.buildGet(rowKey).addFamily(columnFamily).build();
    try {
        boolean completed = false;
        while (!completed) {
            Result result = hTable.get(get);
            byte[] value = result.getValue(columnFamily, COL);
            if (value == null) {
                TreeMap<String, String> properties = new TreeMap<>(topicMetadata.getProperties());
                properties.put(TopicMetadata.GENERATION_KEY, MessagingUtils.Constants.DEFAULT_GENERATION);
                putBuilder.add(columnFamily, COL, Bytes.toBytes(GSON.toJson(properties, MAP_TYPE)));
                completed = hTable.checkAndPut(rowKey, columnFamily, COL, null, putBuilder.build());
            } else {
                Map<String, String> properties = GSON.fromJson(Bytes.toString(value), MAP_TYPE);
                TopicMetadata metadata = new TopicMetadata(topicId, properties);
                if (metadata.exists()) {
                    throw new TopicAlreadyExistsException(topicId.getNamespace(), topicId.getTopic());
                }
                int newGenerationId = (metadata.getGeneration() * -1) + 1;
                TreeMap<String, String> newProperties = new TreeMap<>(properties);
                newProperties.put(TopicMetadata.GENERATION_KEY, Integer.toString(newGenerationId));
                putBuilder.add(columnFamily, COL, Bytes.toBytes(GSON.toJson(newProperties, MAP_TYPE)));
                completed = hTable.checkAndPut(rowKey, columnFamily, COL, value, putBuilder.build());
            }
        }
    } catch (IOException e) {
        throw exceptionHandler.handle(e);
    }
}
Also used : IOException(java.io.IOException) TreeMap(java.util.TreeMap) TopicAlreadyExistsException(co.cask.cdap.api.messaging.TopicAlreadyExistsException) Result(org.apache.hadoop.hbase.client.Result) TopicMetadata(co.cask.cdap.messaging.TopicMetadata) PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) Get(org.apache.hadoop.hbase.client.Get) TopicId(co.cask.cdap.proto.id.TopicId)

Example 3 with Metadata

use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.

the class DefaultMetadataStore method addMetadataToEntities.

private Set<MetadataSearchResultRecord> addMetadataToEntities(Set<NamespacedEntityId> entities, Map<NamespacedEntityId, Metadata> systemMetadata, Map<NamespacedEntityId, Metadata> userMetadata) {
    Set<MetadataSearchResultRecord> result = new LinkedHashSet<>();
    for (NamespacedEntityId entity : entities) {
        ImmutableMap.Builder<MetadataScope, co.cask.cdap.api.metadata.Metadata> builder = ImmutableMap.builder();
        // Add system metadata
        Metadata metadata = systemMetadata.get(entity);
        if (metadata != null) {
            builder.put(MetadataScope.SYSTEM, new co.cask.cdap.api.metadata.Metadata(metadata.getProperties(), metadata.getTags()));
        }
        // Add user metadata
        metadata = userMetadata.get(entity);
        if (metadata != null) {
            builder.put(MetadataScope.USER, new co.cask.cdap.api.metadata.Metadata(metadata.getProperties(), metadata.getTags()));
        }
        // Create result
        result.add(new MetadataSearchResultRecord(entity, builder.build()));
    }
    return result;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) Metadata(co.cask.cdap.data2.metadata.dataset.Metadata) ImmutableMap(com.google.common.collect.ImmutableMap) NamespacedEntityId(co.cask.cdap.proto.id.NamespacedEntityId) MetadataSearchResultRecord(co.cask.cdap.proto.metadata.MetadataSearchResultRecord) MetadataScope(co.cask.cdap.api.metadata.MetadataScope)

Example 4 with Metadata

use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.

the class DefaultMetadataStore method search.

private MetadataSearchResponse search(Set<MetadataScope> scopes, String namespaceId, String searchQuery, Set<EntityTypeSimpleName> types, SortInfo sortInfo, int offset, int limit, int numCursors, String cursor, boolean showHidden, Set<EntityScope> entityScope) throws BadRequestException {
    if (offset < 0) {
        throw new IllegalArgumentException("offset must not be negative");
    }
    if (limit < 0) {
        throw new IllegalArgumentException("limit must not be negative");
    }
    List<MetadataEntry> results = new LinkedList<>();
    List<String> cursors = new LinkedList<>();
    for (MetadataScope scope : scopes) {
        SearchResults searchResults = getSearchResults(scope, namespaceId, searchQuery, types, sortInfo, offset, limit, numCursors, cursor, showHidden, entityScope);
        results.addAll(searchResults.getResults());
        cursors.addAll(searchResults.getCursors());
    }
    // sort if required
    Set<NamespacedEntityId> sortedEntities = getSortedEntities(results, sortInfo);
    int total = sortedEntities.size();
    // pagination is not performed at the dataset level, because:
    // 1. scoring is needed for DEFAULT sort info. So perform it here for now.
    // 2. Even when using custom sorting, we need to remove elements from the beginning to the offset and the cursors
    // at the end
    // TODO: Figure out how all of this can be done server (HBase) side
    int startIndex = Math.min(offset, sortedEntities.size());
    // Account for overflow
    int endIndex = (int) Math.min(Integer.MAX_VALUE, (long) offset + limit);
    endIndex = Math.min(endIndex, sortedEntities.size());
    // add 1 to maxIndex because end index is exclusive
    sortedEntities = new LinkedHashSet<>(ImmutableList.copyOf(sortedEntities).subList(startIndex, endIndex));
    // Fetch metadata for entities in the result list
    // Note: since the fetch is happening in a different transaction, the metadata for entities may have been
    // removed. It is okay not to have metadata for some results in case this happens.
    Map<NamespacedEntityId, Metadata> systemMetadata = fetchMetadata(sortedEntities, MetadataScope.SYSTEM);
    Map<NamespacedEntityId, Metadata> userMetadata = fetchMetadata(sortedEntities, MetadataScope.USER);
    return new MetadataSearchResponse(sortInfo.getSortBy() + " " + sortInfo.getSortOrder(), offset, limit, numCursors, total, addMetadataToEntities(sortedEntities, systemMetadata, userMetadata), cursors, showHidden, entityScope);
}
Also used : Metadata(co.cask.cdap.data2.metadata.dataset.Metadata) MetadataSearchResponse(co.cask.cdap.proto.metadata.MetadataSearchResponse) SearchResults(co.cask.cdap.data2.metadata.dataset.SearchResults) LinkedList(java.util.LinkedList) NamespacedEntityId(co.cask.cdap.proto.id.NamespacedEntityId) MetadataEntry(co.cask.cdap.data2.metadata.dataset.MetadataEntry) MetadataScope(co.cask.cdap.api.metadata.MetadataScope)

Example 5 with Metadata

use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.

the class DefaultMetadataStore method getSnapshotBeforeTime.

@Override
public Set<MetadataRecord> getSnapshotBeforeTime(MetadataScope scope, final Set<NamespacedEntityId> namespacedEntityIds, final long timeMillis) {
    Set<Metadata> metadataHistoryEntries = execute(new TransactionExecutor.Function<MetadataDataset, Set<Metadata>>() {

        @Override
        public Set<Metadata> apply(MetadataDataset input) throws Exception {
            return input.getSnapshotBeforeTime(namespacedEntityIds, timeMillis);
        }
    }, scope);
    ImmutableSet.Builder<MetadataRecord> builder = ImmutableSet.builder();
    for (Metadata metadata : metadataHistoryEntries) {
        builder.add(new MetadataRecord(metadata.getEntityId(), scope, metadata.getProperties(), metadata.getTags()));
    }
    return builder.build();
}
Also used : MetadataDataset(co.cask.cdap.data2.metadata.dataset.MetadataDataset) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) ImmutableSet(com.google.common.collect.ImmutableSet) Metadata(co.cask.cdap.data2.metadata.dataset.Metadata) TransactionExecutor(org.apache.tephra.TransactionExecutor) MetadataRecord(co.cask.cdap.common.metadata.MetadataRecord) BadRequestException(co.cask.cdap.common.BadRequestException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException)

Aggregations

IOException (java.io.IOException)9 BadRequestException (co.cask.cdap.common.BadRequestException)8 MetadataRecord (co.cask.cdap.common.metadata.MetadataRecord)7 MDSKey (co.cask.cdap.data2.dataset2.lib.table.MDSKey)7 Row (co.cask.cdap.api.dataset.table.Row)6 Scanner (co.cask.cdap.api.dataset.table.Scanner)5 DatasetId (co.cask.cdap.proto.id.DatasetId)5 NamespacedEntityId (co.cask.cdap.proto.id.NamespacedEntityId)5 Test (org.junit.Test)5 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)4 NotFoundException (co.cask.cdap.common.NotFoundException)4 MetadataDataset (co.cask.cdap.data2.metadata.dataset.MetadataDataset)4 Lineage (co.cask.cdap.data2.metadata.lineage.Lineage)4 Relation (co.cask.cdap.data2.metadata.lineage.Relation)4 NamespaceId (co.cask.cdap.proto.id.NamespaceId)4 HashMap (java.util.HashMap)4 ConfigModule (co.cask.cdap.common.guice.ConfigModule)3 LocationRuntimeModule (co.cask.cdap.common.guice.LocationRuntimeModule)3 DataSetsModules (co.cask.cdap.data.runtime.DataSetsModules)3 SystemDatasetRuntimeModule (co.cask.cdap.data.runtime.SystemDatasetRuntimeModule)3