use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest in project ozone by apache.
the class OMRequestTestUtils method createCommitPartMPURequest.
/**
* Create OMRequest which encapsulates InitiateMultipartUpload request.
* @param volumeName
* @param bucketName
* @param keyName
*/
public static OMRequest createCommitPartMPURequest(String volumeName, String bucketName, String keyName, long clientID, long size, String multipartUploadID, int partNumber) {
// Just set dummy size.
KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName).setBucketName(bucketName).setDataSize(size).setMultipartNumber(partNumber).setMultipartUploadID(multipartUploadID).addAllKeyLocations(new ArrayList<>());
// Just adding dummy list. As this is for UT only.
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = MultipartCommitUploadPartRequest.newBuilder().setKeyArgs(keyArgs).setClientID(clientID).build();
return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()).setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload).setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest).build();
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest in project ozone by apache.
the class OzoneManagerProtocolClientSideTranslatorPB method commitMultipartUploadPart.
@Override
public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(OmKeyArgs omKeyArgs, long clientId) throws IOException {
List<OmKeyLocationInfo> locationInfoList = omKeyArgs.getLocationInfoList();
Preconditions.checkNotNull(locationInfoList);
MultipartCommitUploadPartRequest.Builder multipartCommitUploadPartRequest = MultipartCommitUploadPartRequest.newBuilder();
KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(omKeyArgs.getVolumeName()).setBucketName(omKeyArgs.getBucketName()).setKeyName(omKeyArgs.getKeyName()).setMultipartUploadID(omKeyArgs.getMultipartUploadID()).setIsMultipartKey(omKeyArgs.getIsMultipartKey()).setMultipartNumber(omKeyArgs.getMultipartUploadPartNumber()).setDataSize(omKeyArgs.getDataSize()).addAllKeyLocations(locationInfoList.stream().map(info -> info.getProtobuf(CURRENT_VERSION)).collect(Collectors.toList()));
multipartCommitUploadPartRequest.setClientID(clientId);
multipartCommitUploadPartRequest.setKeyArgs(keyArgs.build());
OMRequest omRequest = createOMRequest(Type.CommitMultiPartUpload).setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest.build()).build();
MultipartCommitUploadPartResponse response = handleError(submitRequest(omRequest)).getCommitMultiPartUploadResponse();
OmMultipartCommitUploadPartInfo info = new OmMultipartCommitUploadPartInfo(response.getPartName());
return info;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest in project ozone by apache.
the class S3MultipartUploadCommitPartRequest method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest();
KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
String keyName = keyArgs.getKeyName();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
ozoneManager.getMetrics().incNumCommitMultipartUploadParts();
boolean acquiredLock = false;
IOException exception = null;
String partName = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null;
String openKey = null;
OmKeyInfo omKeyInfo = null;
String multipartKey = null;
OmMultipartKeyInfo multipartKeyInfo = null;
Result result = null;
OmBucketInfo omBucketInfo = null;
OmBucketInfo copyBucketInfo = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// check acl
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, OzoneObj.ResourceType.KEY);
acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
String uploadID = keyArgs.getMultipartUploadID();
multipartKey = getMultipartKey(volumeName, bucketName, keyName, omMetadataManager, uploadID);
multipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartKey);
long clientID = multipartCommitUploadPartRequest.getClientID();
openKey = getOpenKey(volumeName, bucketName, keyName, omMetadataManager, clientID);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
omKeyInfo = getOmKeyInfo(omMetadataManager, openKey, keyName);
if (omKeyInfo == null) {
throw new OMException("Failed to commit Multipart Upload key, as " + openKey + "entry is not found in the openKey table", KEY_NOT_FOUND);
}
// set the data size and location info list
omKeyInfo.setDataSize(keyArgs.getDataSize());
omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf).collect(Collectors.toList()), true);
// Set Modification time
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
int partNumber = keyArgs.getMultipartNumber();
partName = getPartName(ozoneKey, uploadID, partNumber);
if (multipartKeyInfo == null) {
// Move this part to delete table.
throw new OMException("No such Multipart upload is with specified " + "uploadId " + uploadID, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
oldPartKeyInfo = multipartKeyInfo.getPartKeyInfo(partNumber);
// Build this multipart upload part info.
OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo = OzoneManagerProtocolProtos.PartKeyInfo.newBuilder();
partKeyInfo.setPartName(partName);
partKeyInfo.setPartNumber(partNumber);
partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf(getOmRequest().getVersion()));
// Add this part information in to multipartKeyInfo.
multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build());
// Set the UpdateID to current transactionLogIndex
multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// OldPartKeyInfo will be deleted. Its updateID will be set in
// S3MultipartUploadCommitPartResponse before being added to
// DeletedKeyTable.
// Add to cache.
// Delete from open key table and add it to multipart info table.
// No need to add cache entries to delete table, as no
// read/write requests that info for validation.
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), trxnLogIndex));
omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(openKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
long scmBlockSize = ozoneManager.getScmBlockSize();
int factor = omKeyInfo.getReplicationConfig().getRequiredNodes();
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// Block was pre-requested and UsedBytes updated when createKey and
// AllocatedBlock. The space occupied by the Key shall be based on
// the actual Key size, and the total Block size applied before should
// be subtracted.
long correctedSpace = omKeyInfo.getDataSize() * factor - keyArgs.getKeyLocationsList().size() * scmBlockSize * factor;
omBucketInfo.incrUsedBytes(correctedSpace);
omResponse.setCommitMultiPartUploadResponse(MultipartCommitUploadPartResponse.newBuilder().setPartName(partName));
omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), omBucketInfo.copyObject());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, createErrorOMResponse(omResponse, exception), copyBucketInfo);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (acquiredLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartCommitUploadPartRequest, keyArgs, auditMap, volumeName, bucketName, keyName, exception, partName, result);
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest in project ozone by apache.
the class S3MultipartUploadCommitPartRequest method preExecute.
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest();
KeyArgs keyArgs = multipartCommitUploadPartRequest.getKeyArgs();
String keyPath = keyArgs.getKeyName();
keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), keyPath, getBucketLayout());
return getOmRequest().toBuilder().setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest.toBuilder().setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()).setKeyName(keyPath))).setUserInfo(getUserInfo()).build();
}
Aggregations