use of org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponseWithFSO in project ozone by apache.
the class S3InitiateMultipartUploadRequestWithFSO method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex, OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest();
KeyArgs keyArgs = multipartInfoInitiateRequest.getKeyArgs();
Preconditions.checkNotNull(keyArgs.getMultipartUploadID());
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
ozoneManager.getMetrics().incNumInitiateMultipartUploads();
boolean acquiredBucketLock = false;
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
OmKeyInfo omKeyInfo = null;
List<OmDirectoryInfo> missingParentInfos;
Result result = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// TODO to support S3 ACL later.
acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, keyName, Paths.get(keyName));
// check if the directory already existed in OM
checkDirectoryResult(keyName, pathInfoFSO.getDirectoryResult());
// add all missing parents to dir table
missingParentInfos = OMDirectoryCreateRequestWithFSO.getAllMissingParentDirInfo(ozoneManager, keyArgs, pathInfoFSO, transactionLogIndex);
// We are adding uploadId to key, because if multiple users try to
// perform multipart upload on the same key, each will try to upload, who
// ever finally commit the key, we see that key in ozone. Suppose if we
// don't add id, and use the same key /volume/bucket/key, when multiple
// users try to upload the key, we update the parts of the key's from
// multiple users to same key, and the key output can be a mix of the
// parts from multiple users.
// So on same key if multiple time multipart upload is initiated we
// store multiple entries in the openKey Table.
// Checked AWS S3, when we try to run multipart upload, each time a
// new uploadId is returned. And also even if a key exist when initiate
// multipart upload request is received, it returns multipart upload id
// for the key.
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
String multipartOpenKey = omMetadataManager.getMultipartKey(pathInfoFSO.getLastKnownParentId(), pathInfoFSO.getLeafNodeName(), keyArgs.getMultipartUploadID());
// Even if this key already exists in the KeyTable, it would be taken
// care of in the final complete multipart upload. AWS S3 behavior is
// also like this, even when key exists in a bucket, user can still
// initiate MPU.
final ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(keyArgs.getType(), keyArgs.getFactor());
multipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(keyArgs.getMultipartUploadID()).setCreationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setParentID(pathInfoFSO.getLastKnownParentId()).build();
omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyArgs.getKeyName()).setCreationTime(keyArgs.getModificationTime()).setModificationTime(keyArgs.getModificationTime()).setReplicationConfig(replicationConfig).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, new ArrayList<>()))).setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())).setObjectID(pathInfoFSO.getLeafNodeObjectId()).setUpdateID(transactionLogIndex).setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null).setParentObjectID(pathInfoFSO.getLastKnownParentId()).build();
// Add cache entries for the prefix directories.
// Skip adding for the file key itself, until Key Commit.
OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, Optional.absent(), Optional.of(missingParentInfos), transactionLogIndex);
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), transactionLogIndex);
// Add to cache
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex));
omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(omResponse.setInitiateMultiPartUploadResponse(MultipartInfoInitiateResponse.newBuilder().setVolumeName(requestedVolume).setBucketName(requestedBucket).setKeyName(keyName).setMultipartUploadID(keyArgs.getMultipartUploadID())).build(), multipartKeyInfo, omKeyInfo, multipartKey, missingParentInfos, getBucketLayout());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = new S3InitiateMultipartUploadResponseWithFSO(createErrorOMResponse(omResponse, exception), getBucketLayout());
} finally {
addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, ozoneManagerDoubleBufferHelper);
if (acquiredBucketLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartInfoInitiateRequest, auditMap, volumeName, bucketName, keyName, exception, result);
return omClientResponse;
}
Aggregations