use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method testAbortUploadSuccessWithParts.
@Test
public void testAbortUploadSuccessWithParts() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String parentDir = "a/b/c/d/";
String keyName = parentDir + UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OzoneManager ozoneManager = cluster.getOzoneManager();
String buckKey = ozoneManager.getMetadataManager().getBucketKey(volume.getName(), bucket.getName());
OmBucketInfo buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE);
String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager();
String multipartKey = verifyUploadedPart(volumeName, bucketName, keyName, uploadID, partName, metadataMgr);
bucket.abortMultipartUpload(keyName, uploadID);
String multipartOpenKey = getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, metadataMgr);
OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey);
Assert.assertNull(omKeyInfo);
Assert.assertNull(omMultipartKeyInfo);
// Since deleteTable operation is performed via
// batchOp - Table.putWithBatch(), which is an async operation and
// not making any assertion for the same.
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class BasicRootedOzoneClientAdapterImpl method getBucket.
/**
* Get OzoneBucket object to operate in.
* Optionally create volume and bucket if not found.
*
* @param createIfNotExist Set this to true if the caller is a write operation
* in order to create the volume and bucket.
* @throws IOException Exceptions other than OMException with result code
* VOLUME_NOT_FOUND or BUCKET_NOT_FOUND.
*/
private OzoneBucket getBucket(String volumeStr, String bucketStr, boolean createIfNotExist) throws IOException {
Preconditions.checkNotNull(volumeStr);
Preconditions.checkNotNull(bucketStr);
if (bucketStr.isEmpty()) {
// throw FileNotFoundException in this case to make Hadoop common happy
throw new FileNotFoundException("getBucket: Invalid argument: given bucket string is empty.");
}
OzoneBucket bucket;
try {
bucket = proxy.getBucketDetails(volumeStr, bucketStr);
// resolve the bucket layout in case of Link Bucket
BucketLayout resolvedBucketLayout = OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>());
OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout);
} catch (OMException ex) {
if (createIfNotExist) {
// when ACL is disabled. Both exceptions need to be handled.
switch(ex.getResult()) {
case VOLUME_NOT_FOUND:
// Create the volume first when the volume doesn't exist
try {
objectStore.createVolume(volumeStr);
} catch (OMException newVolEx) {
// Ignore the case where another client created the volume
if (!newVolEx.getResult().equals(VOLUME_ALREADY_EXISTS)) {
throw newVolEx;
}
}
// No break here. Proceed to create the bucket
case BUCKET_NOT_FOUND:
// When BUCKET_NOT_FOUND is thrown, we expect the parent volume
// exists, so that we don't call create volume and incur
// unnecessary ACL checks which could lead to unwanted behavior.
OzoneVolume volume = proxy.getVolumeDetails(volumeStr);
// Create the bucket
try {
// Buckets created by OFS should be in FSO layout
volume.createBucket(bucketStr, BucketArgs.newBuilder().setBucketLayout(this.defaultOFSBucketLayout).build());
} catch (OMException newBucEx) {
// Ignore the case where another client created the bucket
if (!newBucEx.getResult().equals(BUCKET_ALREADY_EXISTS)) {
throw newBucEx;
}
}
break;
default:
// Throw unhandled exception
throw ex;
}
// Try get bucket again
bucket = proxy.getBucketDetails(volumeStr, bucketStr);
} else {
throw ex;
}
}
return bucket;
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class NSSummaryAdmin method isFileSystemOptimizedBucket.
public boolean isFileSystemOptimizedBucket(String path) throws IOException {
OFSPath ofsPath = new OFSPath(path);
OzoneClient ozoneClient = OzoneClientFactory.getRpcClient(getOzoneConfig());
ObjectStore objectStore = ozoneClient.getObjectStore();
try {
OzoneBucket bucket = objectStore.getVolume(ofsPath.getVolumeName()).getBucket(ofsPath.getBucketName());
// Resolve the bucket layout in case this is a Link Bucket.
BucketLayout resolvedBucketLayout = OzoneClientUtils.resolveLinkBucketLayout(bucket, objectStore, new HashSet<>());
return resolvedBucketLayout.isFileSystemOptimized();
} catch (IOException e) {
System.out.println("Bucket layout couldn't be verified for path: " + ofsPath + ". Exception: " + e);
return false;
}
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class OMBucketCreateRequest method validateAndUpdateCache.
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
OMMetrics omMetrics = ozoneManager.getMetrics();
omMetrics.incNumBucketCreates();
OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
CreateBucketRequest createBucketRequest = getOmRequest().getCreateBucketRequest();
BucketInfo bucketInfo = createBucketRequest.getBucketInfo();
String volumeName = bucketInfo.getVolumeName();
String bucketName = bucketInfo.getBucketName();
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OmBucketInfo omBucketInfo = null;
if (bucketInfo.getBucketLayout() == null || bucketInfo.getBucketLayout().equals(BucketLayoutProto.LEGACY)) {
// Bucket Layout argument was not passed during bucket creation.
String omDefaultBucketLayout = ozoneManager.getOMDefaultBucketLayout();
BucketLayout defaultType = BucketLayout.fromString(omDefaultBucketLayout);
omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo, defaultType);
} else {
omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
}
if (omBucketInfo.getBucketLayout().isFileSystemOptimized()) {
omMetrics.incNumFSOBucketCreates();
}
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
String volumeKey = metadataManager.getVolumeKey(volumeName);
String bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
IOException exception = null;
boolean acquiredBucketLock = false;
boolean acquiredVolumeLock = false;
OMClientResponse omClientResponse = null;
try {
// check Acl
if (ozoneManager.getAclsEnabled()) {
checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE, volumeName, bucketName, null);
}
acquiredVolumeLock = metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName);
acquiredBucketLock = metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().getReadCopy(volumeKey);
// Check if the volume exists
if (omVolumeArgs == null) {
LOG.debug("volume: {} not found ", volumeName);
throw new OMException("Volume doesn't exist", VOLUME_NOT_FOUND);
}
// Check if bucket already exists
if (metadataManager.getBucketTable().isExist(bucketKey)) {
LOG.debug("bucket: {} already exists ", bucketName);
throw new OMException("Bucket already exist", BUCKET_ALREADY_EXISTS);
}
// Check quotaInBytes to update
checkQuotaBytesValid(metadataManager, omVolumeArgs, omBucketInfo, volumeKey);
// Add objectID and updateID
omBucketInfo.setObjectID(ozoneManager.getObjectIdFromTxId(transactionLogIndex));
omBucketInfo.setUpdateID(transactionLogIndex, ozoneManager.isRatisEnabled());
// Add default acls from volume.
addDefaultAcls(omBucketInfo, omVolumeArgs);
// check namespace quota
checkQuotaInNamespace(omVolumeArgs, 1L);
// update used namespace for volume
omVolumeArgs.incrUsedNamespace(1L);
// Update table cache.
metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex));
metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex));
omResponse.setCreateBucketResponse(CreateBucketResponse.newBuilder().build());
omClientResponse = new OMBucketCreateResponse(omResponse.build(), omBucketInfo, omVolumeArgs.copyObject());
} catch (IOException ex) {
exception = ex;
omClientResponse = new OMBucketCreateResponse(createErrorOMResponse(omResponse, exception));
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
if (acquiredVolumeLock) {
metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName);
}
}
// Performing audit logging outside of the lock.
auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET, omBucketInfo.toAuditMap(), exception, userInfo));
// return response.
if (exception == null) {
LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
omMetrics.incNumBuckets();
return omClientResponse;
} else {
omMetrics.incNumBucketCreateFails();
LOG.error("Bucket creation failed for bucket:{} in volume:{}", bucketName, volumeName, exception);
return omClientResponse;
}
}
use of org.apache.hadoop.ozone.om.helpers.BucketLayout in project ozone by apache.
the class OzoneManagerRatisUtils method createClientRequest.
/**
* Create OMClientRequest which encapsulates the OMRequest.
* @param omRequest
* @return OMClientRequest
* @throws IOException
*/
@SuppressWarnings("checkstyle:methodlength")
public static OMClientRequest createClientRequest(OMRequest omRequest, OzoneManager ozoneManager) throws IOException {
// Handling of exception by createClientRequest(OMRequest, OzoneManger):
// Either the code will take FSO or non FSO path, both classes has a
// validateAndUpdateCache() function which also contains
// validateBucketAndVolume() function which validates bucket and volume and
// throws necessary exceptions if required. validateAndUpdateCache()
// function has catch block which catches the exception if required and
// handles it appropriately.
Type cmdType = omRequest.getCmdType();
OzoneManagerProtocolProtos.KeyArgs keyArgs;
BucketLayout bucketLayout = BucketLayout.DEFAULT;
switch(cmdType) {
case CreateVolume:
return new OMVolumeCreateRequest(omRequest);
case SetVolumeProperty:
boolean hasQuota = omRequest.getSetVolumePropertyRequest().hasQuotaInBytes();
boolean hasOwner = omRequest.getSetVolumePropertyRequest().hasOwnerName();
Preconditions.checkState(hasOwner || hasQuota, "Either Quota or owner " + "should be set in the SetVolumeProperty request");
Preconditions.checkState(!(hasOwner && hasQuota), "Either Quota or " + "owner should be set in the SetVolumeProperty request. Should not " + "set both");
if (hasQuota) {
return new OMVolumeSetQuotaRequest(omRequest);
} else {
return new OMVolumeSetOwnerRequest(omRequest);
}
case DeleteVolume:
return new OMVolumeDeleteRequest(omRequest);
case CreateBucket:
return new OMBucketCreateRequest(omRequest);
case DeleteBucket:
return new OMBucketDeleteRequest(omRequest);
case SetBucketProperty:
boolean hasBucketOwner = omRequest.getSetBucketPropertyRequest().getBucketArgs().hasOwnerName();
if (hasBucketOwner) {
return new OMBucketSetOwnerRequest(omRequest);
} else {
return new OMBucketSetPropertyRequest(omRequest);
}
case AddAcl:
case RemoveAcl:
case SetAcl:
return getOMAclRequest(omRequest, ozoneManager);
case GetDelegationToken:
return new OMGetDelegationTokenRequest(omRequest);
case CancelDelegationToken:
return new OMCancelDelegationTokenRequest(omRequest);
case RenewDelegationToken:
return new OMRenewDelegationTokenRequest(omRequest);
case GetS3Secret:
return new S3GetSecretRequest(omRequest);
case RecoverTrash:
return new OMTrashRecoverRequest(omRequest);
case FinalizeUpgrade:
return new OMFinalizeUpgradeRequest(omRequest);
case Prepare:
return new OMPrepareRequest(omRequest);
case CancelPrepare:
return new OMCancelPrepareRequest(omRequest);
case RevokeS3Secret:
return new S3RevokeSecretRequest(omRequest);
/**
* Following key requests will be created in {@link OMKeyRequestFactory}.
*/
case CreateDirectory:
case CreateFile:
case CreateKey:
case AllocateBlock:
case CommitKey:
case DeleteKey:
case DeleteKeys:
case RenameKey:
case RenameKeys:
case PurgeKeys:
case PurgePaths:
case InitiateMultiPartUpload:
case CommitMultiPartUpload:
case AbortMultiPartUpload:
case CompleteMultiPartUpload:
return OMKeyRequestFactory.createRequest(omRequest, ozoneManager);
default:
throw new IllegalStateException("Unrecognized write command " + "type request" + cmdType);
}
}
Aggregations