use of org.apache.hadoop.hdds.utils.db.cache.CacheValue in project ozone by apache.
the class OMFileRequest method checkSubFileExists.
private static boolean checkSubFileExists(OmKeyInfo omKeyInfo, OMMetadataManager metaMgr) throws IOException {
// Check all fileTable cache for any sub paths.
Table fileTable = metaMgr.getKeyTable(getBucketLayout(metaMgr, omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()));
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter = fileTable.cacheIterator();
while (cacheIter.hasNext()) {
Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry = cacheIter.next();
OmKeyInfo cacheOmFileInfo = entry.getValue().getCacheValue();
if (cacheOmFileInfo == null) {
continue;
}
if (isImmediateChild(cacheOmFileInfo.getParentObjectID(), omKeyInfo.getObjectID())) {
// found a sub path file
return true;
}
}
// Check fileTable entries for any sub paths.
String seekFileInDB = metaMgr.getOzonePathKey(omKeyInfo.getObjectID(), "");
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator = fileTable.iterator();
iterator.seek(seekFileInDB);
if (iterator.hasNext()) {
Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
OmKeyInfo fileInfo = entry.getValue();
return isImmediateChild(fileInfo.getParentObjectID(), // found a sub path file
omKeyInfo.getObjectID());
}
// no sub paths found
return false;
}
use of org.apache.hadoop.hdds.utils.db.cache.CacheValue in project ozone by apache.
the class OmMetadataManagerImpl method listBuckets.
/**
* {@inheritDoc}
*/
@Override
public List<OmBucketInfo> listBuckets(final String volumeName, final String startBucket, final String bucketPrefix, final int maxNumOfBuckets) throws IOException {
List<OmBucketInfo> result = new ArrayList<>();
if (Strings.isNullOrEmpty(volumeName)) {
throw new OMException("Volume name is required.", ResultCodes.VOLUME_NOT_FOUND);
}
String volumeNameBytes = getVolumeKey(volumeName);
if (volumeTable.get(volumeNameBytes) == null) {
throw new OMException("Volume " + volumeName + " not found.", ResultCodes.VOLUME_NOT_FOUND);
}
String startKey;
boolean skipStartKey = false;
if (StringUtil.isNotBlank(startBucket)) {
// if the user has specified a start key, we need to seek to that key
// and avoid that key in the response set.
startKey = getBucketKey(volumeName, startBucket);
skipStartKey = true;
} else {
// If the user has specified a prefix key, we need to get to the first
// of the keys with the prefix match. We can leverage RocksDB to do that.
// However, if the user has specified only a prefix, we cannot skip
// the first prefix key we see, the boolean skipStartKey allows us to
// skip the startkey or not depending on what patterns are specified.
startKey = getBucketKey(volumeName, bucketPrefix);
}
String seekPrefix;
if (StringUtil.isNotBlank(bucketPrefix)) {
seekPrefix = getBucketKey(volumeName, bucketPrefix);
} else {
seekPrefix = getVolumeKey(volumeName + OM_KEY_PREFIX);
}
int currentCount = 0;
// For Bucket it is full cache, so we can just iterate in-memory table
// cache.
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> iterator = bucketTable.cacheIterator();
while (currentCount < maxNumOfBuckets && iterator.hasNext()) {
Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry = iterator.next();
String key = entry.getKey().getCacheKey();
OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
if (omBucketInfo != null) {
if (key.equals(startKey) && skipStartKey) {
continue;
}
// the keys after the startBucket.
if (key.startsWith(seekPrefix) && key.compareTo(startKey) >= 0) {
result.add(omBucketInfo);
currentCount++;
}
}
}
return result;
}
use of org.apache.hadoop.hdds.utils.db.cache.CacheValue in project ozone by apache.
the class OmMetadataManagerImpl method isBucketEmpty.
/**
* Given a volume/bucket, check if it is empty, i.e there are no keys inside
* it. Prefix is /volume/bucket/, and we lookup the keyTable.
*
* @param volume - Volume name
* @param bucket - Bucket name
* @return true if the bucket is empty
*/
@Override
public boolean isBucketEmpty(String volume, String bucket) throws IOException {
String keyPrefix = getBucketKey(volume, bucket);
// First check in key table cache.
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator = ((TypedTable<String, OmKeyInfo>) keyTable).cacheIterator();
while (iterator.hasNext()) {
Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry = iterator.next();
String key = entry.getKey().getCacheKey();
OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
// Making sure that entry is not for delete key request.
if (key.startsWith(keyPrefix) && omKeyInfo != null) {
return false;
}
}
try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter = keyTable.iterator()) {
KeyValue<String, OmKeyInfo> kv = keyIter.seek(keyPrefix);
if (kv != null) {
// Check the entry in db is not marked for delete. This can happen
// while entry is marked for delete, but it is not flushed to DB.
CacheValue<OmKeyInfo> cacheValue = keyTable.getCacheValue(new CacheKey(kv.getKey()));
if (cacheValue != null) {
if (kv.getKey().startsWith(keyPrefix) && cacheValue.getCacheValue() != null) {
// we found at least one key with this vol/bucket
return false;
// prefix.
}
} else {
if (kv.getKey().startsWith(keyPrefix)) {
// we found at least one key with this vol/bucket
return false;
// prefix.
}
}
}
}
return true;
}
use of org.apache.hadoop.hdds.utils.db.cache.CacheValue in project ozone by apache.
the class OmMetadataManagerImpl method listKeys.
@Override
public List<OmKeyInfo> listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException {
List<OmKeyInfo> result = new ArrayList<>();
if (maxKeys <= 0) {
return result;
}
if (Strings.isNullOrEmpty(volumeName)) {
throw new OMException("Volume name is required.", ResultCodes.VOLUME_NOT_FOUND);
}
if (Strings.isNullOrEmpty(bucketName)) {
throw new OMException("Bucket name is required.", ResultCodes.BUCKET_NOT_FOUND);
}
String bucketNameBytes = getBucketKey(volumeName, bucketName);
if (getBucketTable().get(bucketNameBytes) == null) {
throw new OMException("Bucket " + bucketName + " not found.", ResultCodes.BUCKET_NOT_FOUND);
}
String seekKey;
boolean skipStartKey = false;
if (StringUtil.isNotBlank(startKey)) {
// Seek to the specified key.
seekKey = getOzoneKey(volumeName, bucketName, startKey);
skipStartKey = true;
} else {
// This allows us to seek directly to the first key with the right prefix.
seekKey = getOzoneKey(volumeName, bucketName, StringUtil.isNotBlank(keyPrefix) ? keyPrefix : OM_KEY_PREFIX);
}
String seekPrefix;
if (StringUtil.isNotBlank(keyPrefix)) {
seekPrefix = getOzoneKey(volumeName, bucketName, keyPrefix);
} else {
seekPrefix = getBucketKey(volumeName, bucketName + OM_KEY_PREFIX);
}
int currentCount = 0;
TreeMap<String, OmKeyInfo> cacheKeyMap = new TreeMap<>();
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator = keyTable.cacheIterator();
// is becoming slow.
while (iterator.hasNext()) {
Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry = iterator.next();
String key = entry.getKey().getCacheKey();
OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
if (omKeyInfo != null && key.startsWith(seekPrefix) && key.compareTo(seekKey) >= 0) {
cacheKeyMap.put(key, omKeyInfo);
}
}
try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter = getKeyTable(getBucketLayout()).iterator()) {
KeyValue<String, OmKeyInfo> kv;
keyIter.seek(seekKey);
// we should skip that entry and return the result.
while (currentCount < maxKeys + 1 && keyIter.hasNext()) {
kv = keyIter.next();
if (kv != null && kv.getKey().startsWith(seekPrefix)) {
// Entry should not be marked for delete, consider only those
// entries.
CacheValue<OmKeyInfo> cacheValue = keyTable.getCacheValue(new CacheKey<>(kv.getKey()));
if (cacheValue == null || cacheValue.getCacheValue() != null) {
cacheKeyMap.put(kv.getKey(), kv.getValue());
currentCount++;
}
} else {
// loop.
break;
}
}
}
// Finally DB entries and cache entries are merged, then return the count
// of maxKeys from the sorted map.
currentCount = 0;
for (Map.Entry<String, OmKeyInfo> cacheKey : cacheKeyMap.entrySet()) {
if (cacheKey.getKey().equals(seekKey) && skipStartKey) {
continue;
}
result.add(cacheKey.getValue());
currentCount++;
if (currentCount == maxKeys) {
break;
}
}
// Clear map and set.
cacheKeyMap.clear();
return result;
}
use of org.apache.hadoop.hdds.utils.db.cache.CacheValue in project ozone by apache.
the class TrashOzoneFileSystem method getTrashRoots.
@Override
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
Preconditions.checkArgument(allUsers);
ozoneManager.getMetrics().incNumTrashGetTrashRoots();
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> bucketIterator = ozoneManager.getMetadataManager().getBucketIterator();
List<FileStatus> ret = new ArrayList<>();
while (bucketIterator.hasNext()) {
Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry = bucketIterator.next();
OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
Path volumePath = new Path(OZONE_URI_DELIMITER, omBucketInfo.getVolumeName());
Path bucketPath = new Path(volumePath, omBucketInfo.getBucketName());
Path trashRoot = new Path(bucketPath, FileSystem.TRASH_PREFIX);
try {
if (exists(trashRoot)) {
FileStatus[] list = this.listStatus(trashRoot);
for (FileStatus candidate : list) {
if (exists(candidate.getPath()) && candidate.isDirectory()) {
ret.add(candidate);
}
}
}
} catch (Exception e) {
LOG.error("Couldn't perform fs operation " + "fs.listStatus()/fs.exists()", e);
}
}
return ret;
}
Aggregations