use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class PrefixParser method parse.
public void parse(String vol, String buck, String db, String file) throws Exception {
if (!Files.exists(Paths.get(db))) {
System.out.println("DB path not exist:" + db);
return;
}
System.out.println("FilePath is:" + file);
System.out.println("Db Path is:" + db);
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, db);
OmMetadataManagerImpl metadataManager = new OmMetadataManagerImpl(conf);
metadataManager.start(conf);
org.apache.hadoop.fs.Path effectivePath = new org.apache.hadoop.fs.Path("/");
Path p = Paths.get(file);
String volumeKey = metadataManager.getVolumeKey(vol);
if (!metadataManager.getVolumeTable().isExist(volumeKey)) {
System.out.println("Invalid Volume:" + vol);
metadataManager.stop();
return;
}
parserStats[Types.VOLUME.ordinal()]++;
// First get the info about the bucket
String bucketKey = metadataManager.getBucketKey(vol, buck);
OmBucketInfo info = metadataManager.getBucketTable().get(bucketKey);
if (info == null) {
System.out.println("Invalid Bucket:" + buck);
metadataManager.stop();
return;
}
BucketLayout bucketLayout = OzoneManagerUtils.resolveLinkBucketLayout(info, metadataManager, new HashSet<>()).getBucketLayout();
if (!bucketLayout.isFileSystemOptimized()) {
System.out.println("Prefix tool only works for FileSystem Optimized" + "bucket. Bucket Layout is:" + bucketLayout);
metadataManager.stop();
return;
}
long lastObjectId = info.getObjectID();
WithParentObjectId objectBucketId = new WithParentObjectId();
objectBucketId.setObjectID(lastObjectId);
dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey);
Iterator<Path> pathIterator = p.iterator();
while (pathIterator.hasNext()) {
Path elem = pathIterator.next();
String path = metadataManager.getOzonePathKey(lastObjectId, elem.toString());
OmDirectoryInfo directoryInfo = metadataManager.getDirectoryTable().get(path);
org.apache.hadoop.fs.Path tmpPath = getEffectivePath(effectivePath, elem.toString());
if (directoryInfo == null) {
System.out.println("Given path contains a non-existent directory at:" + tmpPath);
System.out.println("Dumping files and dirs at level:" + tmpPath.getParent());
System.out.println();
parserStats[Types.NON_EXISTENT_DIRECTORY.ordinal()]++;
break;
}
effectivePath = tmpPath;
dumpInfo(Types.INTERMEDIATE_DIRECTORY, effectivePath, directoryInfo, path);
lastObjectId = directoryInfo.getObjectID();
}
// at the last level, now parse both file and dir table
dumpTableInfo(Types.DIRECTORY, effectivePath, metadataManager.getDirectoryTable(), lastObjectId);
dumpTableInfo(Types.FILE, effectivePath, metadataManager.getKeyTable(getBucketLayout()), lastObjectId);
metadataManager.stop();
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class TestObjectStore method testCreateLinkBucketWithBucketLayout.
/**
* Ensure Link Buckets have same BucketLayout as source buckets.
* @throws Exception
*/
@Test
public void testCreateLinkBucketWithBucketLayout() throws Exception {
String volumeName = UUID.randomUUID().toString();
String sourceBucket1Name = UUID.randomUUID().toString();
BucketLayout sourceBucket1Layout = BucketLayout.FILE_SYSTEM_OPTIMIZED;
String sourceBucket2Name = UUID.randomUUID().toString();
BucketLayout sourceBucket2Layout = BucketLayout.OBJECT_STORE;
String linkBucket1Name = UUID.randomUUID().toString();
String linkBucket2Name = UUID.randomUUID().toString();
// Chained link bucket
String linkBucket3Name = UUID.randomUUID().toString();
OzoneClient client = cluster.getClient();
ObjectStore store = client.getObjectStore();
// Create volume
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
// Create source buckets
BucketArgs.Builder builder = BucketArgs.newBuilder();
builder.setBucketLayout(sourceBucket1Layout);
volume.createBucket(sourceBucket1Name, builder.build());
builder.setBucketLayout(sourceBucket2Layout);
volume.createBucket(sourceBucket2Name, builder.build());
// Create link buckets
createLinkBucket(volume, sourceBucket1Name, linkBucket1Name);
createLinkBucket(volume, sourceBucket2Name, linkBucket2Name);
// linkBucket3 is chained onto linkBucket1
createLinkBucket(volume, linkBucket1Name, linkBucket3Name);
// Check that Link Buckets' layouts match source bucket layouts
OzoneBucket bucket = volume.getBucket(linkBucket1Name);
Assert.assertEquals(sourceBucket1Layout, bucket.getBucketLayout());
bucket = volume.getBucket(linkBucket2Name);
Assert.assertEquals(sourceBucket2Layout, bucket.getBucketLayout());
// linkBucket3 is chained onto linkBucket1, hence its bucket layout matches
// linkBucket1's source bucket.
bucket = volume.getBucket(linkBucket3Name);
Assert.assertEquals(sourceBucket1Layout, bucket.getBucketLayout());
Assert.assertEquals(linkBucket1Name, bucket.getSourceBucket());
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class KeyManagerImpl method checkAccess.
/**
* Check access for given ozoneObject.
*
* @param ozObject object for which access needs to be checked.
* @param context Context object encapsulating all user related information.
* @return true if user has access else false.
*/
@Override
public boolean checkAccess(OzoneObj ozObject, RequestContext context) throws OMException {
Objects.requireNonNull(ozObject);
Objects.requireNonNull(context);
Objects.requireNonNull(context.getClientUgi());
String volume = ozObject.getVolumeName();
String bucket = ozObject.getBucketName();
String keyName = ozObject.getKeyName();
String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
OmKeyArgs args = new OmKeyArgs.Builder().setVolumeName(volume).setBucketName(bucket).setKeyName(keyName).setHeadOp(true).build();
BucketLayout bucketLayout = BucketLayout.DEFAULT;
if (ozoneManager != null) {
String buckKey = ozoneManager.getMetadataManager().getBucketKey(volume, bucket);
OmBucketInfo buckInfo = null;
try {
buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
bucketLayout = buckInfo.getBucketLayout();
} catch (IOException e) {
LOG.error("Failed to get bucket for the key: " + buckKey, e);
}
}
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket);
try {
OMFileRequest.validateBucket(metadataManager, volume, bucket);
OmKeyInfo keyInfo;
// OpenKeyTable since appends to existing keys are not supported.
if (context.getAclRights() == IAccessAuthorizer.ACLType.WRITE) {
keyInfo = metadataManager.getOpenKeyTable(bucketLayout).get(objectKey);
} else {
// Rename and delete operations will send ACL_TYPE DELETE
if (context.isRecursiveAccessCheck() && context.getAclRights() == IAccessAuthorizer.ACLType.DELETE) {
return checkChildrenAcls(ozObject, context);
}
try {
OzoneFileStatus fileStatus = getFileStatus(args);
keyInfo = fileStatus.getKeyInfo();
} catch (IOException e) {
// To Avoid KEY_NOT_FOUND Exception.
if (context.getAclRights() == IAccessAuthorizer.ACLType.READ) {
return true;
} else {
throw new OMException("Key not found, checkAccess failed. Key:" + objectKey, KEY_NOT_FOUND);
}
}
}
if (keyInfo == null) {
// the key does not exist, but it is a parent "dir" of some key
// let access be determined based on volume/bucket/prefix ACL
LOG.debug("key:{} is non-existent parent, permit access to user:{}", keyName, context.getClientUgi());
return true;
}
boolean hasAccess = OzoneAclUtil.checkAclRights(keyInfo.getAcls(), context);
if (LOG.isDebugEnabled()) {
LOG.debug("user:{} has access rights for key:{} :{} ", context.getClientUgi(), ozObject.getKeyName(), hasAccess);
}
return hasAccess;
} catch (IOException ex) {
if (ex instanceof OMException) {
throw (OMException) ex;
}
LOG.error("CheckAccess operation failed for key:{}/{}/{}", volume, bucket, keyName, ex);
throw new OMException("Check access operation failed for " + "key:" + keyName, ex, INTERNAL_ERROR);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket);
}
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class KeyManagerImpl method lookupKey.
@Override
public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException {
Preconditions.checkNotNull(args);
String volumeName = args.getVolumeName();
String bucketName = args.getBucketName();
String keyName = args.getKeyName();
metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
BucketLayout bucketLayout = getBucketLayout(metadataManager, args.getVolumeName(), args.getBucketName());
keyName = OMClientRequest.validateAndNormalizeKey(enableFileSystemPaths, keyName, bucketLayout);
OmKeyInfo value = null;
try {
if (bucketLayout.isFileSystemOptimized()) {
value = getOmKeyInfoFSO(volumeName, bucketName, keyName);
} else {
value = getOmKeyInfo(volumeName, bucketName, keyName);
}
} catch (IOException ex) {
if (ex instanceof OMException) {
throw ex;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Get key failed for volume:{} bucket:{} key:{}", volumeName, bucketName, keyName, ex);
}
throw new OMException(ex.getMessage(), KEY_NOT_FOUND);
} finally {
metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
}
if (value == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, bucketName, keyName);
}
throw new OMException("Key:" + keyName + " not found", KEY_NOT_FOUND);
}
if (args.getLatestVersionLocation()) {
slimLocationVersion(value);
}
// As head operation does not need any of those details.
if (!args.isHeadOp()) {
// add block token for read.
addBlockToken4Read(value);
// Refresh container pipeline info from SCM
// based on OmKeyArgs.refreshPipeline flag
// value won't be null as the check is done inside try/catch block.
refresh(value);
if (args.getSortDatanodes()) {
sortDatanodes(clientAddress, value);
}
}
return value;
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method verifyUploadedPart.
private String verifyUploadedPart(String volumeName, String bucketName, String keyName, String uploadID, String partName, OMMetadataManager metadataMgr) throws IOException {
OzoneManager ozoneManager = cluster.getOzoneManager();
String buckKey = ozoneManager.getMetadataManager().getBucketKey(volumeName, bucketName);
OmBucketInfo buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
String multipartOpenKey = getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, metadataMgr);
String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName, keyName, uploadID);
OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey);
Assert.assertNotNull(omKeyInfo);
Assert.assertNotNull(omMultipartKeyInfo);
Assert.assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getKeyName());
Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap = omMultipartKeyInfo.getPartKeyInfoMap();
for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry : partKeyInfoMap.entrySet()) {
OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
Assert.assertEquals(keyName, currentKeyPartInfo.getKeyName());
// verify dbPartName
Assert.assertEquals(partName, partKeyInfo.getPartName());
}
return multipartKey;
}
Aggregations