use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs in project ozone by apache.
the class S3MultipartUploadAbortRequest method preExecute.
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
KeyArgs keyArgs = getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs();
String keyPath = keyArgs.getKeyName();
keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), keyPath, getBucketLayout());
return getOmRequest().toBuilder().setAbortMultiPartUploadRequest(getOmRequest().getAbortMultiPartUploadRequest().toBuilder().setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()).setKeyName(keyPath))).setUserInfo(getUserInfo()).build();
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs in project ozone by apache.
the class S3MultipartUploadCommitPartRequest method blockMPUCommitWithBucketLayoutFromOldClient.
/**
* Validates S3 MPU commit part requests.
* We do not want to allow older clients to commit MPU keys to buckets which
* use non LEGACY layouts.
*
* @param req - the request to validate
* @param ctx - the validation context
* @return the validated request
* @throws OMException if the request is invalid
*/
@RequestFeatureValidator(conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.PRE_PROCESS, requestType = Type.CommitMultiPartUpload)
public static OMRequest blockMPUCommitWithBucketLayoutFromOldClient(OMRequest req, ValidationContext ctx) throws IOException {
if (req.getCommitMultiPartUploadRequest().hasKeyArgs()) {
KeyArgs keyArgs = req.getCommitMultiPartUploadRequest().getKeyArgs();
if (keyArgs.hasVolumeName() && keyArgs.hasBucketName()) {
BucketLayout bucketLayout = ctx.getBucketLayout(keyArgs.getVolumeName(), keyArgs.getBucketName());
bucketLayout.validateSupportedOperation();
}
}
return req;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs in project ozone by apache.
the class S3MultipartUploadCommitPartRequest method preExecute.
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest();
KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
String keyPath = keyArgs.getKeyName();
keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), keyPath, getBucketLayout());
return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest.toBuilder().setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()).setKeyName(keyPath))).setUserInfo(getUserInfo()).build();
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs in project ozone by apache.
the class S3MultipartUploadCommitPartRequest method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest();
KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
String keyName = keyArgs.getKeyName();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
ozoneManager.getMetrics().incNumCommitMultipartUploadParts();
boolean acquiredLock = false;
IOException exception = null;
String partName = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
String openKey = null;
OmKeyInfo omKeyInfo = null;
String multipartKey = null;
OmMultipartKeyInfo multipartKeyInfo = null;
Result result = null;
OmBucketInfo omBucketInfo = null;
OmBucketInfo copyBucketInfo = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// check acl
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, OzoneObj.ResourceType.KEY);
acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
String uploadID = keyArgs.getMultipartUploadID();
multipartKey = getMultipartKey(volumeName, bucketName, keyName, omMetadataManager, uploadID);
multipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartKey);
long clientID = multipartCommitUploadPartRequest.getClientID();
openKey = getOpenKey(volumeName, bucketName, keyName, omMetadataManager, clientID);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
omKeyInfo = getOmKeyInfo(omMetadataManager, openKey, keyName);
if (omKeyInfo == null) {
throw new OMException("Failed to commit Multipart Upload key, as " + openKey + "entry is not found in the openKey table", KEY_NOT_FOUND);
}
// set the data size and location info list
omKeyInfo.setDataSize(keyArgs.getDataSize());
omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf).collect(Collectors.toList()), true);
// Set Modification time
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
int partNumber = keyArgs.getMultipartNumber();
partName = getPartName(ozoneKey, uploadID, partNumber);
if (multipartKeyInfo == null) {
// Move this part to delete table.
throw new OMException("No such Multipart upload is with specified " + "uploadId " + uploadID, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
oldPartKeyInfo = multipartKeyInfo.getPartKeyInfo(partNumber);
// Build this multipart upload part info.
OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo = OzoneManagerProtocolProtos.PartKeyInfo.newBuilder();
partKeyInfo.setPartName(partName);
partKeyInfo.setPartNumber(partNumber);
partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(getOmRequest().getVersion()));
// Add this part information in to multipartKeyInfo.
multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build());
// Set the UpdateID to current transactionLogIndex
multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// OldPartKeyInfo will be deleted. Its updateID will be set in
// S3MultipartUploadCommitPartResponse before being added to
// DeletedKeyTable.
// Add to cache.
// Delete from open key table and add it to multipart info table.
// No need to add cache entries to delete table, as no
// read/write requests that info for validation.
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), trxnLogIndex));
omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(openKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
long correctedSpace = omKeyInfo.getReplicatedSize();
// TODO: S3MultipartUpload did not check quota and did not add nameSpace,
// we need to fix these issues in HDDS-6650.
omBucketInfo.incrUsedBytes(correctedSpace);
omResponse.setCommitMultiPartUploadResponse(MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), omBucketInfo.copyObject());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, createErrorOMResponse(omResponse, exception), copyBucketInfo);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (acquiredLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs, auditMap, volumeName, bucketName, keyName, exception, partName, result);
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs in project ozone by apache.
the class S3MultipartUploadCompleteRequest method blockMPUCompleteWithBucketLayoutFromOldClient.
/**
* Validates S3 MPU complete requests.
* We do not want to allow older clients to upload MPU keys to buckets which
* use non LEGACY layouts.
*
* @param req - the request to validate
* @param ctx - the validation context
* @return the validated request
* @throws OMException if the request is invalid
*/
@RequestFeatureValidator(conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.PRE_PROCESS, requestType = Type.CompleteMultiPartUpload)
public static OMRequest blockMPUCompleteWithBucketLayoutFromOldClient(OMRequest req, ValidationContext ctx) throws IOException {
if (req.getCompleteMultiPartUploadRequest().hasKeyArgs()) {
KeyArgs keyArgs = req.getCompleteMultiPartUploadRequest().getKeyArgs();
if (keyArgs.hasVolumeName() && keyArgs.hasBucketName()) {
BucketLayout bucketLayout = ctx.getBucketLayout(keyArgs.getVolumeName(), keyArgs.getBucketName());
bucketLayout.validateSupportedOperation();
}
}
return req;
}
Aggregations