Search in sources :

Example 1 with CacheKey

use of org.apache.hadoop.hdds.utils.db.cache.CacheKey in project ozone by apache.

the class KeyManagerImpl method listStatus.

/**
 * List the status for a file or a directory and its contents.
 *
 * @param args       Key args
 * @param recursive  For a directory if true all the descendants of a
 *                   particular directory are listed
 * @param startKey   Key from which listing needs to start. If startKey exists
 *                   its status is included in the final list.
 * @param numEntries Number of entries to list from the start key
 * @param clientAddress a hint to key manager, order the datanode in returned
 *                      pipeline by distance between client and datanode.
 * @return list of file status
 */
@Override
@SuppressWarnings("methodlength")
public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, String clientAddress) throws IOException {
    Preconditions.checkNotNull(args, "Key args can not be null");
    String volName = args.getVolumeName();
    String buckName = args.getBucketName();
    List<OzoneFileStatus> fileStatusList = new ArrayList<>();
    if (numEntries <= 0) {
        return fileStatusList;
    }
    if (isBucketFSOptimized(volName, buckName)) {
        return listStatusFSO(args, recursive, startKey, numEntries, clientAddress);
    }
    String volumeName = args.getVolumeName();
    String bucketName = args.getBucketName();
    String keyName = args.getKeyName();
    // A map sorted by OmKey to combine results from TableCache and DB.
    TreeMap<String, OzoneFileStatus> cacheKeyMap = new TreeMap<>();
    if (Strings.isNullOrEmpty(startKey)) {
        OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
        if (fileStatus.isFile()) {
            return Collections.singletonList(fileStatus);
        }
        // keyName is a directory
        startKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
    }
    // Note: eliminating the case where startCacheKey could end with '//'
    String keyArgs = OzoneFSUtils.addTrailingSlashIfNeeded(metadataManager.getOzoneKey(volumeName, bucketName, keyName));
    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
    Table keyTable = metadataManager.getKeyTable(getBucketLayout(metadataManager, volName, buckName));
    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator;
    try {
        Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter = keyTable.cacheIterator();
        String startCacheKey = OZONE_URI_DELIMITER + volumeName + OZONE_URI_DELIMITER + bucketName + OZONE_URI_DELIMITER + ((startKey.equals(OZONE_URI_DELIMITER)) ? "" : startKey);
        // First, find key in TableCache
        listStatusFindKeyInTableCache(cacheIter, keyArgs, startCacheKey, recursive, cacheKeyMap);
        iterator = keyTable.iterator();
    } finally {
        metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
    }
    // Then, find key in DB
    String seekKeyInDb = metadataManager.getOzoneKey(volumeName, bucketName, startKey);
    Table.KeyValue<String, OmKeyInfo> entry = iterator.seek(seekKeyInDb);
    int countEntries = 0;
    if (iterator.hasNext()) {
        if (entry.getKey().equals(keyArgs)) {
            // Skip the key itself, since we are listing inside the directory
            iterator.next();
        }
        // Iterate through seek results
        while (iterator.hasNext() && numEntries - countEntries > 0) {
            entry = iterator.next();
            String entryInDb = entry.getKey();
            OmKeyInfo omKeyInfo = entry.getValue();
            if (entryInDb.startsWith(keyArgs)) {
                String entryKeyName = omKeyInfo.getKeyName();
                if (recursive) {
                    if (!isKeyDeleted(entryInDb, keyTable)) {
                        cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, scmBlockSize, !OzoneFSUtils.isFile(entryKeyName)));
                        countEntries++;
                    }
                } else {
                    // get the child of the directory to list from the entry. For
                    // example if directory to list is /a and entry is /a/b/c where
                    // c is a file. The immediate child is b which is a directory. c
                    // should not be listed as child of a.
                    String immediateChild = OzoneFSUtils.getImmediateChild(entryKeyName, keyName);
                    boolean isFile = OzoneFSUtils.isFile(immediateChild);
                    if (isFile) {
                        if (!isKeyDeleted(entryInDb, keyTable)) {
                            cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, scmBlockSize, !isFile));
                            countEntries++;
                        }
                    } else {
                        // if entry is a directory
                        if (!isKeyDeleted(entryInDb, keyTable)) {
                            if (!entryKeyName.equals(immediateChild)) {
                                OmKeyInfo fakeDirEntry = createDirectoryKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), immediateChild, omKeyInfo.getAcls());
                                cacheKeyMap.put(entryInDb, new OzoneFileStatus(fakeDirEntry, scmBlockSize, true));
                            } else {
                                // If entryKeyName matches dir name, we have the info
                                cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, 0, true));
                            }
                            countEntries++;
                        }
                        // skip the other descendants of this child directory.
                        iterator.seek(getNextGreaterString(volumeName, bucketName, immediateChild));
                    }
                }
            } else {
                break;
            }
        }
    }
    countEntries = 0;
    // Convert results in cacheKeyMap to List
    for (OzoneFileStatus fileStatus : cacheKeyMap.values()) {
        // No need to check if a key is deleted or not here, this is handled
        // when adding entries to cacheKeyMap from DB.
        fileStatusList.add(fileStatus);
        countEntries++;
        if (countEntries >= numEntries) {
            break;
        }
    }
    // Clean up temp map and set
    cacheKeyMap.clear();
    List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
    fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add);
    if (args.getLatestVersionLocation()) {
        slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0]));
    }
    refreshPipeline(keyInfoList);
    if (args.getSortDatanodes()) {
        sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
    }
    return fileStatusList;
}
Also used : Arrays(java.util.Arrays) INTERNAL_ERROR(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR) OzoneFSUtils(org.apache.hadoop.ozone.om.helpers.OzoneFSUtils) StringUtils(org.apache.commons.lang3.StringUtils) GeneralSecurityException(java.security.GeneralSecurityException) OM_KEY_PREFIX(org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX) HADOOP_SECURITY_KEY_PROVIDER_PATH(org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH) Map(java.util.Map) Path(java.nio.file.Path) EnumSet(java.util.EnumSet) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) DFS_CONTAINER_RATIS_ENABLED_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT) Set(java.util.Set) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) SecurityUtil(org.apache.hadoop.security.SecurityUtil) OzoneBlockTokenSecretManager(org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager) VOLUME_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND) CodecRegistry(org.apache.hadoop.hdds.utils.db.CodecRegistry) HDDS_BLOCK_TOKEN_ENABLED(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OZONE_URI_DELIMITER(org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER) OmMultipartUploadListParts(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts) OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT) INVALID_KMS_PROVIDER(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Time.monotonicNow(org.apache.hadoop.util.Time.monotonicNow) EncryptedKeyVersion(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion) Strings(com.google.common.base.Strings) OMFileRequest(org.apache.hadoop.ozone.om.request.file.OMFileRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) KeyProviderCryptoExtension(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) BlockGroup(org.apache.hadoop.ozone.common.BlockGroup) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) BucketEncryptionKeyInfo(org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo) OZONE_BLOCK_DELETING_SERVICE_INTERVAL(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) TreeMap(java.util.TreeMap) OZONE_SCM_BLOCK_SIZE_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT) DIRECTORY_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND) Paths(java.nio.file.Paths) Table(org.apache.hadoop.hdds.utils.db.Table) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey) OmPartInfo(org.apache.hadoop.ozone.om.helpers.OmPartInfo) READ(org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ) Preconditions(com.google.common.base.Preconditions) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) RequestContext(org.apache.hadoop.ozone.security.acl.RequestContext) LoggerFactory(org.slf4j.LoggerFactory) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ScmBlockLocationProtocol(org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol) OZONE_SCM_BLOCK_SIZE(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE) KEY_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND) OZONE_BLOCK_DELETING_SERVICE_TIMEOUT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) OzoneAcl(org.apache.hadoop.ozone.OzoneAcl) OmMultipartUpload(org.apache.hadoop.ozone.om.helpers.OmMultipartUpload) StorageUnit(org.apache.hadoop.conf.StorageUnit) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) Collection(java.util.Collection) ReplicationFactor(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor) FILE_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) BUCKET_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND) OZONE_KEY_PREALLOCATION_BLOCKS_MAX(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX) OMClientRequest(org.apache.hadoop.ozone.om.request.OMClientRequest) OzoneObj(org.apache.hadoop.ozone.security.acl.OzoneObj) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) OZONE_DIR_DELETING_SERVICE_INTERVAL(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL) OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT) HashMap(java.util.HashMap) BackgroundService(org.apache.hadoop.hdds.utils.BackgroundService) OZONE_CLIENT_LIST_TRASH_KEYS_MAX(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX) OmUtils(org.apache.hadoop.ozone.OmUtils) Stack(java.util.Stack) ResultCodes(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes) OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT) HashSet(java.util.HashSet) PartKeyInfo(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo) OmMultipartUploadList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneAclUtil(org.apache.hadoop.ozone.om.helpers.OzoneAclUtil) Server(org.apache.hadoop.ipc.Server) HDDS_BLOCK_TOKEN_ENABLED_DEFAULT(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT) DFS_CONTAINER_RATIS_ENABLED_KEY(org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY) BUCKET_LOCK(org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) SCM_GET_PIPELINE_EXCEPTION(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) TimeUnit(java.util.concurrent.TimeUnit) KEY(org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) IAccessAuthorizer(org.apache.hadoop.ozone.security.acl.IAccessAuthorizer) StorageContainerLocationProtocol(org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol) OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT) Time(org.apache.hadoop.util.Time) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Table(org.apache.hadoop.hdds.utils.db.Table) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)

Example 2 with CacheKey

use of org.apache.hadoop.hdds.utils.db.cache.CacheKey in project ozone by apache.

the class KeyManagerImpl method listStatusFindFilesInTableCache.

/**
 * Helper function for listStatus to find key in FileTableCache.
 */
@SuppressWarnings("parameternumber")
private int listStatusFindFilesInTableCache(Map<String, OzoneFileStatus> cacheKeyMap, Table<String, OmKeyInfo> keyTable, long prefixKeyInDB, String seekKeyInDB, String prefixKeyPath, String startKey, int countEntries, long numEntries, Set<String> deletedKeySet) {
    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter = keyTable.cacheIterator();
    // TODO: recursive list will be handled in HDDS-4360 jira.
    while (cacheIter.hasNext() && numEntries - countEntries > 0) {
        Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry = cacheIter.next();
        String cacheKey = entry.getKey().getCacheKey();
        OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
        // cacheOmKeyInfo is null if an entry is deleted in cache
        if (cacheOmKeyInfo == null) {
            deletedKeySet.add(cacheKey);
            continue;
        }
        // make OmKeyInfo local copy to reset keyname to "fullKeyPath".
        // In DB keyName stores only the leaf node but the list
        // returning to the user should have full path.
        OmKeyInfo omKeyInfo = cacheOmKeyInfo.copyObject();
        omKeyInfo.setFileName(omKeyInfo.getKeyName());
        String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath, omKeyInfo.getKeyName());
        omKeyInfo.setKeyName(fullKeyPath);
        countEntries = addKeyInfoToFileStatusList(cacheKeyMap, prefixKeyInDB, seekKeyInDB, startKey, countEntries, cacheKey, omKeyInfo, false);
    }
    return countEntries;
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey)

Example 3 with CacheKey

use of org.apache.hadoop.hdds.utils.db.cache.CacheKey in project ozone by apache.

the class KeyManagerImpl method listStatusFindKeyInTableCache.

/**
 * Helper function for listStatus to find key in TableCache.
 */
private void listStatusFindKeyInTableCache(Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter, String keyArgs, String startCacheKey, boolean recursive, TreeMap<String, OzoneFileStatus> cacheKeyMap) {
    while (cacheIter.hasNext()) {
        Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry = cacheIter.next();
        String cacheKey = entry.getKey().getCacheKey();
        if (cacheKey.equals(keyArgs)) {
            continue;
        }
        OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue();
        // cacheOmKeyInfo is null if an entry is deleted in cache
        if (cacheOmKeyInfo != null && cacheKey.startsWith(startCacheKey) && cacheKey.compareTo(startCacheKey) >= 0) {
            if (!recursive) {
                String remainingKey = StringUtils.stripEnd(cacheKey.substring(startCacheKey.length()), OZONE_URI_DELIMITER);
                // For non-recursive, the remaining part of key can't have '/'
                if (remainingKey.contains(OZONE_URI_DELIMITER)) {
                    continue;
                }
            }
            OzoneFileStatus fileStatus = new OzoneFileStatus(cacheOmKeyInfo, scmBlockSize, !OzoneFSUtils.isFile(cacheKey));
            cacheKeyMap.put(cacheKey, fileStatus);
        }
    }
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) Map(java.util.Map) TreeMap(java.util.TreeMap) HashMap(java.util.HashMap) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey)

Example 4 with CacheKey

use of org.apache.hadoop.hdds.utils.db.cache.CacheKey in project ozone by apache.

the class OmMetadataManagerImpl method listAllVolumes.

/**
 * @return list of all volumes.
 */
private List<OmVolumeArgs> listAllVolumes(String prefix, String startKey, int maxKeys) {
    List<OmVolumeArgs> result = Lists.newArrayList();
    /* volumeTable is full-cache, so we use cacheIterator. */
    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmVolumeArgs>>> cacheIterator = getVolumeTable().cacheIterator();
    String volumeName;
    OmVolumeArgs omVolumeArgs;
    boolean prefixIsEmpty = Strings.isNullOrEmpty(prefix);
    boolean startKeyIsEmpty = Strings.isNullOrEmpty(startKey);
    while (cacheIterator.hasNext() && result.size() < maxKeys) {
        Map.Entry<CacheKey<String>, CacheValue<OmVolumeArgs>> entry = cacheIterator.next();
        omVolumeArgs = entry.getValue().getCacheValue();
        volumeName = omVolumeArgs.getVolume();
        if (!prefixIsEmpty && !volumeName.startsWith(prefix)) {
            continue;
        }
        if (!startKeyIsEmpty) {
            if (volumeName.equals(startKey)) {
                startKeyIsEmpty = true;
            }
            continue;
        }
        result.add(omVolumeArgs);
    }
    return result;
}
Also used : OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey)

Example 5 with CacheKey

use of org.apache.hadoop.hdds.utils.db.cache.CacheKey in project ozone by apache.

the class OmMetadataManagerImpl method isVolumeEmpty.

/**
 * Given a volume, check if it is empty, i.e there are no buckets inside it.
 * We iterate in the bucket table and see if there is any key that starts with
 * the volume prefix. We actually look for /volume/, since if we don't have
 * the trailing slash it is possible that we might match some other volume.
 * <p>
 * For example, vol1 and vol122 might match, to avoid that we look for /vol1/
 *
 * @param volume - Volume name
 * @return true if the volume is empty
 */
@Override
public boolean isVolumeEmpty(String volume) throws IOException {
    String volumePrefix = getVolumeKey(volume + OM_KEY_PREFIX);
    // First check in bucket table cache.
    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> iterator = ((TypedTable<String, OmBucketInfo>) bucketTable).cacheIterator();
    while (iterator.hasNext()) {
        Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry = iterator.next();
        String key = entry.getKey().getCacheKey();
        OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
        // Making sure that entry is not for delete bucket request.
        if (key.startsWith(volumePrefix) && omBucketInfo != null) {
            return false;
        }
    }
    try (TableIterator<String, ? extends KeyValue<String, OmBucketInfo>> bucketIter = bucketTable.iterator()) {
        KeyValue<String, OmBucketInfo> kv = bucketIter.seek(volumePrefix);
        if (kv != null) {
            // Check the entry in db is not marked for delete. This can happen
            // while entry is marked for delete, but it is not flushed to DB.
            CacheValue<OmBucketInfo> cacheValue = bucketTable.getCacheValue(new CacheKey(kv.getKey()));
            if (cacheValue != null) {
                if (kv.getKey().startsWith(volumePrefix) && cacheValue.getCacheValue() != null) {
                    // we found at least one bucket with this volume
                    return false;
                // prefix.
                }
            } else {
                if (kv.getKey().startsWith(volumePrefix)) {
                    // we found at least one bucket with this volume
                    return false;
                // prefix.
                }
            }
        }
    }
    return true;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey)

Aggregations

Map (java.util.Map)12 CacheKey (org.apache.hadoop.hdds.utils.db.cache.CacheKey)12 CacheValue (org.apache.hadoop.hdds.utils.db.cache.CacheValue)12 HashMap (java.util.HashMap)9 TreeMap (java.util.TreeMap)9 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)7 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)7 ArrayList (java.util.ArrayList)4 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)4 Table (org.apache.hadoop.hdds.utils.db.Table)3 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)3 OzoneFileStatus (org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)3 IOException (java.io.IOException)2 OmDirectoryInfo (org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 Strings (com.google.common.base.Strings)1 Path (java.nio.file.Path)1 Paths (java.nio.file.Paths)1 GeneralSecurityException (java.security.GeneralSecurityException)1