use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest in project ozone by apache.
the class OzoneManagerProtocolClientSideTranslatorPB method initiateMultipartUpload.
/**
* Return the proxy object underlying this protocol translator.
*
* @return the proxy object underlying this protocol translator.
*/
@Override
public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws IOException {
MultipartInfoInitiateRequest.Builder multipartInfoInitiateRequest = MultipartInfoInitiateRequest.newBuilder();
KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(omKeyArgs.getVolumeName()).setBucketName(omKeyArgs.getBucketName()).setKeyName(omKeyArgs.getKeyName()).addAllAcls(omKeyArgs.getAcls().stream().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
if (omKeyArgs.getReplicationConfig() != null) {
keyArgs.setFactor(ReplicationConfig.getLegacyFactor(omKeyArgs.getReplicationConfig()));
keyArgs.setType(omKeyArgs.getReplicationConfig().getReplicationType());
}
multipartInfoInitiateRequest.setKeyArgs(keyArgs.build());
OMRequest omRequest = createOMRequest(Type.InitiateMultiPartUpload).setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest.build()).build();
MultipartInfoInitiateResponse resp = handleError(submitRequest(omRequest)).getInitiateMultiPartUploadResponse();
return new OmMultipartInfo(resp.getVolumeName(), resp.getBucketName(), resp.getKeyName(), resp.getMultipartUploadID());
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest in project ozone by apache.
the class S3InitiateMultipartUploadRequestWithFSO method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
ozoneManager.getMetrics().incNumInitiateMultipartUploads();
boolean acquiredBucketLock = false;
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
OmKeyInfo omKeyInfo = null;
List<OmDirectoryInfo> missingParentInfos;
Result result = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// TODO to support S3 ACL later.
acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName));
// check if the directory already existed in OM
checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult());
// add all missing parents to dir table
missingParentInfos = OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, transactionLogIndex);
// We are adding uploadId to key, because if multiple users try to
// perform multipart upload on the same key, each will try to upload, who
// ever finally commit the key, we see that key in ozone. Suppose if we
// don't add id, and use the same key /volume/bucket/key, when multiple
// users try to upload the key, we update the parts of the key's from
// multiple users to same key, and the key output can be a mix of the
// parts from multiple users.
// So on same key if multiple time multipart upload is initiated we
// store multiple entries in the openKey Table.
// Checked AWS S3, when we try to run multipart upload, each time a
// new uploadId is returned. And also even if a key exist when initiate
// multipart upload request is received, it returns multipart upload id
// for the key.
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
String multipartOpenKey = omMetadataManager.getMultipartKey(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
// Even if this key already exists in the KeyTable, it would be taken
// care of in the final complete multipart upload. AWS S3 behavior is
// also like this, even when key exists in a bucket, user can still
// initiate MPU.
final ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor());
multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(keyArgs.getMultipartUploadID()).setCreationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setParentID(pathInfoFSO.getLastKnownParentId()).build();
omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyArgs.getKeyName()).setCreationTime(keyArgs.getModificationTime()).setModificationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null).setParentObjectID(pathInfoFSO.getLastKnownParentId()).build();
// Add cache entries for the prefix directories.
// Skip adding for the file key itself, until Key Commit.
OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, Optional.absent(), Optional.of(missingParentInfos), transactionLogIndex);
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), transactionLogIndex);
// Add to cache
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(omResponse.setInitiateMultiPartUploadResponse(MultipartInfoInitiateResponse.newBuilder().setVolumeName(requestedVolume).setBucketName(requestedBucket).setKeyName(keyName).setMultipartUploadID(keyArgs.getMultipartUploadID())).build(), multipartKeyInfo, omKeyInfo, multipartKey, missingParentInfos, getBucketLayout());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName, bucketName, keyName, exception, result);
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest in project ozone by apache.
the class S3InitiateMultipartUploadRequest method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
ozoneManager.getMetrics().incNumInitiateMultipartUploads();
boolean acquiredBucketLock = false;
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
OmKeyInfo omKeyInfo = null;
Result result = null;
long objectID = ozoneManager.getObjectIdFromTxId(transactionLogIndex);
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// check Acl
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
// We are adding uploadId to key, because if multiple users try to
// perform multipart upload on the same key, each will try to upload, who
// ever finally commit the key, we see that key in ozone. Suppose if we
// don't add id, and use the same key /volume/bucket/key, when multiple
// users try to upload the key, we update the parts of the key's from
// multiple users to same key, and the key output can be a mix of the
// parts from multiple users.
// So on same key if multiple time multipart upload is initiated we
// store multiple entries in the openKey Table.
// Checked AWS S3, when we try to run multipart upload, each time a
// new uploadId is returned. And also even if a key exist when initiate
// multipart upload request is received, it returns multipart upload id
// for the key.
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
// Even if this key already exists in the KeyTable, it would be taken
// care of in the final complete multipart upload. AWS S3 behavior is
// also like this, even when key exists in a bucket, user can still
// initiate MPU.
final ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor());
multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(keyArgs.getMultipartUploadID()).setCreationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setObjectID(objectID).setUpdateID(transactionLogIndex).build();
OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(omMetadataManager.getBucketKey(volumeName, bucketName));
omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyArgs.getKeyName()).setCreationTime(keyArgs.getModificationTime()).setModificationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setAcls(getAclsForKey(keyArgs, bucketInfo, ozoneManager.getPrefixManager())).setObjectID(objectID).setUpdateID(transactionLogIndex).setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null).build();
// Add to cache
omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex));
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
omClientResponse = new S3InitiateMultipartUploadResponse(omResponse.setInitiateMultiPartUploadResponse(MultipartInfoInitiateResponse.newBuilder().setVolumeName(requestedVolume).setBucketName(requestedBucket).setKeyName(keyName).setMultipartUploadID(keyArgs.getMultipartUploadID())).build(), multipartKeyInfo, omKeyInfo, getBucketLayout());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = new S3InitiateMultipartUploadResponse(createErrorOMResponse(omResponse, exception), getBucketLayout());
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName, bucketName, keyName, exception, result);
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest in project ozone by apache.
the class S3InitiateMultipartUploadRequest method preExecute.
@Override
public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
Preconditions.checkNotNull(multipartInfoInitiateRequest);
KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
String keyPath = keyArgs.getKeyName();
keyPath = validateAndNormalizeKey(ozoneManager.getEnableFileSystemPaths(), keyPath, getBucketLayout());
KeyArgs.Builder newKeyArgs = keyArgs.toBuilder().setMultipartUploadID(UUID.randomUUID().toString() + "-" + UniqueId.next()).setModificationTime(Time.now()).setKeyName(keyPath);
generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager);
return getOmRequest().toBuilder().setUserInfo(getUserInfo()).setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest.toBuilder().setKeyArgs(newKeyArgs)).build();
}
Aggregations