use of org.apache.hadoop.hdds.utils.db.Table in project ozone by apache.
the class ContainerMapper method parseOmDB.
/**
* Generates Container Id to Blocks and BlockDetails mapping.
* @param configuration @{@link OzoneConfiguration}
* @return Map<Long, List<Map<Long, @BlockDetails>>>
* Map of ContainerId -> (Block, Block info)
* @throws IOException
*/
public Map<Long, List<Map<Long, BlockIdDetails>>> parseOmDB(OzoneConfiguration configuration) throws IOException {
String path = configuration.get(OZONE_OM_DB_DIRS);
if (path == null || path.isEmpty()) {
throw new IOException(OZONE_OM_DB_DIRS + "should be set ");
} else {
Table keyTable = getMetaTable(configuration);
Map<Long, List<Map<Long, BlockIdDetails>>> dataMap = new HashMap<>();
if (keyTable != null) {
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> keyValueTableIterator = keyTable.iterator()) {
while (keyValueTableIterator.hasNext()) {
Table.KeyValue<String, OmKeyInfo> keyValue = keyValueTableIterator.next();
OmKeyInfo omKeyInfo = keyValue.getValue();
byte[] value = omKeyInfo.getProtobuf(true, CURRENT_VERSION).toByteArray();
OmKeyInfo keyInfo = OmKeyInfo.getFromProtobuf(OzoneManagerProtocolProtos.KeyInfo.parseFrom(value));
for (OmKeyLocationInfoGroup keyLocationInfoGroup : keyInfo.getKeyLocationVersions()) {
List<OmKeyLocationInfo> keyLocationInfo = keyLocationInfoGroup.getLocationList();
for (OmKeyLocationInfo keyLocation : keyLocationInfo) {
BlockIdDetails blockIdDetails = new BlockIdDetails();
Map<Long, BlockIdDetails> innerMap = new HashMap<>();
long containerID = keyLocation.getBlockID().getContainerID();
long blockID = keyLocation.getBlockID().getLocalID();
blockIdDetails.setBucketName(keyInfo.getBucketName());
blockIdDetails.setBlockVol(keyInfo.getVolumeName());
blockIdDetails.setKeyName(keyInfo.getKeyName());
List<Map<Long, BlockIdDetails>> innerList = new ArrayList<>();
innerMap.put(blockID, blockIdDetails);
if (dataMap.containsKey(containerID)) {
innerList = dataMap.get(containerID);
}
innerList.add(innerMap);
dataMap.put(containerID, innerList);
}
}
}
}
}
return dataMap;
}
}
use of org.apache.hadoop.hdds.utils.db.Table in project ozone by apache.
the class KeyManagerImpl method listStatusFSO.
@SuppressWarnings("methodlength")
public List<OzoneFileStatus> listStatusFSO(OmKeyArgs args, boolean recursive, String startKey, long numEntries, String clientAddress) throws IOException {
Preconditions.checkNotNull(args, "Key args can not be null");
if (numEntries <= 0) {
return new ArrayList<>();
}
/**
* A map sorted by OmKey to combine results from TableCache and DB for
* each entity - Dir & File.
*
* Two separate maps are required because the order of seek -> (1)Seek
* files in fileTable (2)Seek dirs in dirTable.
*
* StartKey should be added to the final listStatuses, so if we combine
* files and dirs into a single map then directory with lower precedence
* will appear at the top of the list even if the startKey is given as
* fileName.
*
* For example, startKey="a/file1". As per the seek order, first fetches
* all the files and then it will start seeking all the directories.
* Assume a directory name exists "a/b". With one map, the sorted list will
* be ["a/b", "a/file1"]. But the expected list is: ["a/file1", "a/b"],
* startKey element should always be at the top of the listStatuses.
*/
TreeMap<String, OzoneFileStatus> cacheFileMap = new TreeMap<>();
TreeMap<String, OzoneFileStatus> cacheDirMap = new TreeMap<>();
final String volumeName = args.getVolumeName();
final String bucketName = args.getBucketName();
final String keyName = args.getKeyName();
String seekFileInDB;
String seekDirInDB;
long prefixKeyInDB;
String prefixPath = keyName;
int countEntries = 0;
// TODO: recursive flag=true will be handled in HDDS-4360 jira.
Set<String> deletedKeySet = new TreeSet<>();
TreeMap<String, OzoneFileStatus> tempCacheDirMap = new TreeMap<>();
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator;
if (Strings.isNullOrEmpty(startKey)) {
OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
if (fileStatus.isFile()) {
return Collections.singletonList(fileStatus);
}
/*
* keyName is a directory.
* Say, "/a" is the dir name and its objectID is 1024, then seek
* will be doing with "1024/" to get all immediate descendants.
*/
if (fileStatus.getKeyInfo() != null) {
prefixKeyInDB = fileStatus.getKeyInfo().getObjectID();
} else {
// list root directory.
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey);
prefixKeyInDB = omBucketInfo.getObjectID();
}
seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
// Order of seek ->
// (1)Seek files in fileTable
// (2)Seek dirs in dirTable
// First under lock obtain both entries from dir/file cache and generate
// entries marked for delete.
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
try {
iterator = metadataManager.getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)).iterator();
countEntries = getFilesAndDirsFromCacheWithBucket(volumeName, bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet, prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, startKey, countEntries, numEntries);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB, prefixPath, prefixKeyInDB, countEntries, numEntries, deletedKeySet, iterator);
} else {
// keyName=/a/ and expected startKey=/a/b. startKey can't be /xyz/b.
if (StringUtils.isNotBlank(keyName) && !OzoneFSUtils.isImmediateChild(keyName, startKey)) {
if (LOG.isDebugEnabled()) {
LOG.debug("StartKey {} is not an immediate child of keyName {}. " + "Returns empty list", startKey, keyName);
}
return Collections.emptyList();
}
// assign startKeyPath if prefixPath is empty string.
if (StringUtils.isBlank(prefixPath)) {
prefixPath = OzoneFSUtils.getParentDir(startKey);
}
OmKeyArgs startKeyArgs = args.toBuilder().setKeyName(startKey).setSortDatanodesInPipeline(false).build();
OzoneFileStatus fileStatusInfo = getOzoneFileStatusFSO(startKeyArgs, null, true);
if (fileStatusInfo != null) {
prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID();
if (fileStatusInfo.isDirectory()) {
seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, fileStatusInfo.getKeyInfo().getFileName());
// Order of seek -> (1) Seek dirs only in dirTable. In OM, always
// the order of search is, first seek into fileTable and then
// dirTable. So, its not required to search again in the fileTable.
// Seek the given key in dirTable.
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
try {
listStatusFindDirsInTableCache(tempCacheDirMap, metadataManager.getDirectoryTable(), prefixKeyInDB, seekDirInDB, prefixPath, startKey, volumeName, bucketName, countEntries, numEntries, deletedKeySet);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
} else {
seekFileInDB = metadataManager.getOzonePathKey(prefixKeyInDB, fileStatusInfo.getKeyInfo().getFileName());
// begins from the first sub-dir under the parent dir
seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, "");
// First under lock obtain both entries from dir/file cache and
// generate entries marked for delete.
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
try {
iterator = metadataManager.getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)).iterator();
countEntries = getFilesAndDirsFromCacheWithBucket(volumeName, bucketName, cacheFileMap, tempCacheDirMap, deletedKeySet, prefixKeyInDB, seekFileInDB, seekDirInDB, prefixPath, startKey, countEntries, numEntries);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
// 1. Seek the given key in key table.
countEntries = getFilesFromDirectory(cacheFileMap, seekFileInDB, prefixPath, prefixKeyInDB, countEntries, numEntries, deletedKeySet, iterator);
}
} else {
// TODO: HDDS-4364: startKey can be a non-existed key
if (LOG.isDebugEnabled()) {
LOG.debug("StartKey {} is a non-existed key and returning empty " + "list", startKey);
}
return Collections.emptyList();
}
}
// is less than countEntries.
for (Map.Entry<String, OzoneFileStatus> dirEntry : tempCacheDirMap.entrySet()) {
if (countEntries < numEntries) {
cacheDirMap.put(dirEntry.getKey(), dirEntry.getValue());
countEntries++;
}
}
// 2. Seek the given key in dir table.
if (countEntries < numEntries) {
getDirectories(cacheDirMap, seekDirInDB, prefixPath, prefixKeyInDB, countEntries, numEntries, recursive, volumeName, bucketName, deletedKeySet);
}
return buildFinalStatusList(cacheFileMap, cacheDirMap, args, clientAddress);
}
use of org.apache.hadoop.hdds.utils.db.Table in project ozone by apache.
the class KeyManagerImpl method listStatus.
/**
* List the status for a file or a directory and its contents.
*
* @param args Key args
* @param recursive For a directory if true all the descendants of a
* particular directory are listed
* @param startKey Key from which listing needs to start. If startKey exists
* its status is included in the final list.
* @param numEntries Number of entries to list from the start key
* @param clientAddress a hint to key manager, order the datanode in returned
* pipeline by distance between client and datanode.
* @return list of file status
*/
@Override
@SuppressWarnings("methodlength")
public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, String clientAddress) throws IOException {
Preconditions.checkNotNull(args, "Key args can not be null");
String volName = args.getVolumeName();
String buckName = args.getBucketName();
List<OzoneFileStatus> fileStatusList = new ArrayList<>();
if (numEntries <= 0) {
return fileStatusList;
}
if (isBucketFSOptimized(volName, buckName)) {
return listStatusFSO(args, recursive, startKey, numEntries, clientAddress);
}
String volumeName = args.getVolumeName();
String bucketName = args.getBucketName();
String keyName = args.getKeyName();
// A map sorted by OmKey to combine results from TableCache and DB.
TreeMap<String, OzoneFileStatus> cacheKeyMap = new TreeMap<>();
if (Strings.isNullOrEmpty(startKey)) {
OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
if (fileStatus.isFile()) {
return Collections.singletonList(fileStatus);
}
// keyName is a directory
startKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
}
// Note: eliminating the case where startCacheKey could end with '//'
String keyArgs = OzoneFSUtils.addTrailingSlashIfNeeded(metadataManager.getOzoneKey(volumeName, bucketName, keyName));
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
Table keyTable = metadataManager.getKeyTable(getBucketLayout(metadataManager, volName, buckName));
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator;
try {
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter = keyTable.cacheIterator();
String startCacheKey = OZONE_URI_DELIMITER + volumeName + OZONE_URI_DELIMITER + bucketName + OZONE_URI_DELIMITER + ((startKey.equals(OZONE_URI_DELIMITER)) ? "" : startKey);
// First, find key in TableCache
listStatusFindKeyInTableCache(cacheIter, keyArgs, startCacheKey, recursive, cacheKeyMap);
iterator = keyTable.iterator();
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
// Then, find key in DB
String seekKeyInDb = metadataManager.getOzoneKey(volumeName, bucketName, startKey);
Table.KeyValue<String, OmKeyInfo> entry = iterator.seek(seekKeyInDb);
int countEntries = 0;
if (iterator.hasNext()) {
if (entry.getKey().equals(keyArgs)) {
// Skip the key itself, since we are listing inside the directory
iterator.next();
}
// Iterate through seek results
while (iterator.hasNext() && numEntries - countEntries > 0) {
entry = iterator.next();
String entryInDb = entry.getKey();
OmKeyInfo omKeyInfo = entry.getValue();
if (entryInDb.startsWith(keyArgs)) {
String entryKeyName = omKeyInfo.getKeyName();
if (recursive) {
if (!isKeyDeleted(entryInDb, keyTable)) {
cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, scmBlockSize, !OzoneFSUtils.isFile(entryKeyName)));
countEntries++;
}
} else {
// get the child of the directory to list from the entry. For
// example if directory to list is /a and entry is /a/b/c where
// c is a file. The immediate child is b which is a directory. c
// should not be listed as child of a.
String immediateChild = OzoneFSUtils.getImmediateChild(entryKeyName, keyName);
boolean isFile = OzoneFSUtils.isFile(immediateChild);
if (isFile) {
if (!isKeyDeleted(entryInDb, keyTable)) {
cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, scmBlockSize, !isFile));
countEntries++;
}
} else {
// if entry is a directory
if (!isKeyDeleted(entryInDb, keyTable)) {
if (!entryKeyName.equals(immediateChild)) {
OmKeyInfo fakeDirEntry = createDirectoryKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), immediateChild, omKeyInfo.getAcls());
cacheKeyMap.put(entryInDb, new OzoneFileStatus(fakeDirEntry, scmBlockSize, true));
} else {
// If entryKeyName matches dir name, we have the info
cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, 0, true));
}
countEntries++;
}
// skip the other descendants of this child directory.
iterator.seek(getNextGreaterString(volumeName, bucketName, immediateChild));
}
}
} else {
break;
}
}
}
countEntries = 0;
// Convert results in cacheKeyMap to List
for (OzoneFileStatus fileStatus : cacheKeyMap.values()) {
// No need to check if a key is deleted or not here, this is handled
// when adding entries to cacheKeyMap from DB.
fileStatusList.add(fileStatus);
countEntries++;
if (countEntries >= numEntries) {
break;
}
}
// Clean up temp map and set
cacheKeyMap.clear();
List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add);
if (args.getLatestVersionLocation()) {
slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0]));
}
refreshPipeline(keyInfoList);
if (args.getSortDatanodes()) {
sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
}
return fileStatusList;
}
use of org.apache.hadoop.hdds.utils.db.Table in project ozone by apache.
the class KeyManagerImpl method getFilesFromDirectory.
@SuppressWarnings("parameternumber")
private int getFilesFromDirectory(TreeMap<String, OzoneFileStatus> cacheKeyMap, String seekKeyInDB, String prefixKeyPath, long prefixKeyInDB, int countEntries, long numEntries, Set<String> deletedKeySet, TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator) throws IOException {
iterator.seek(seekKeyInDB);
while (iterator.hasNext() && numEntries - countEntries > 0) {
Table.KeyValue<String, OmKeyInfo> entry = iterator.next();
OmKeyInfo keyInfo = entry.getValue();
if (deletedKeySet.contains(keyInfo.getPath())) {
// move to next entry in the table
iterator.next();
// entry is actually deleted in cache and can exists in DB
continue;
}
if (!OMFileRequest.isImmediateChild(keyInfo.getParentObjectID(), prefixKeyInDB)) {
break;
}
keyInfo.setFileName(keyInfo.getKeyName());
String fullKeyPath = OMFileRequest.getAbsolutePath(prefixKeyPath, keyInfo.getKeyName());
keyInfo.setKeyName(fullKeyPath);
cacheKeyMap.put(fullKeyPath, new OzoneFileStatus(keyInfo, scmBlockSize, false));
countEntries++;
}
return countEntries;
}
use of org.apache.hadoop.hdds.utils.db.Table in project ozone by apache.
the class KeyValueContainerMetadataInspector method countPendingDeletesSchemaV2.
private long countPendingDeletesSchemaV2(DatanodeStoreSchemaTwoImpl schemaTwoStore) throws IOException {
long pendingDeleteBlockCountTotal = 0;
Table<Long, DeletedBlocksTransaction> delTxTable = schemaTwoStore.getDeleteTransactionTable();
try (TableIterator<Long, ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iterator = delTxTable.iterator()) {
while (iterator.hasNext()) {
DeletedBlocksTransaction txn = iterator.next().getValue();
// In schema 2, pending delete blocks are stored in the
// transaction object. Since the actual blocks still exist in the
// block data table with no prefix, they have already been
// counted towards bytes used and total block count above.
pendingDeleteBlockCountTotal += txn.getLocalIDList().size();
}
}
return pendingDeleteBlockCountTotal;
}
Aggregations