use of org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite in project hbase by apache.
the class HFileSystem method getStoragePolicyForOldHDFSVersion.
/**
* Before Hadoop 2.8.0, there's no getStoragePolicy method for FileSystem interface, and we need
* to keep compatible with it. See HADOOP-12161 for more details.
* @param path Path to get storage policy against
* @return the storage policy name
*/
private String getStoragePolicyForOldHDFSVersion(Path path) {
try {
if (this.fs instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) this.fs;
HdfsFileStatus status = dfs.getClient().getFileInfo(path.toUri().getPath());
if (null != status) {
if (unspecifiedStoragePolicyId < 0) {
// Get the unspecified id field through reflection to avoid compilation error.
// In later version BlockStoragePolicySuite#ID_UNSPECIFIED is moved to
// HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
Field idUnspecified = BlockStoragePolicySuite.class.getField("ID_UNSPECIFIED");
unspecifiedStoragePolicyId = idUnspecified.getByte(BlockStoragePolicySuite.class);
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId != unspecifiedStoragePolicyId) {
BlockStoragePolicy[] policies = dfs.getStoragePolicies();
for (BlockStoragePolicy policy : policies) {
if (policy.getId() == storagePolicyId) {
return policy.getName();
}
}
}
}
}
} catch (Throwable e) {
LOG.warn("failed to get block storage policy of [" + path + "]", e);
}
return null;
}
Aggregations