use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo in project ozone by apache.
the class S3MultipartUploadAbortRequest method validateAndUpdateCache.
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
MultipartUploadAbortRequest multipartUploadAbortRequest = getOmRequest().getAbortMultiPartUploadRequest();
OzoneManagerProtocolProtos.KeyArgs keyArgs = multipartUploadAbortRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
ozoneManager.getMetrics().incNumAbortMultipartUploads();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquiredLock = false;
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
String multipartKey = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
Result result = null;
OmBucketInfo omBucketInfo = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// check acl
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, OzoneObj.ResourceType.KEY);
acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
String multipartOpenKey;
try {
multipartOpenKey = getMultipartOpenKey(keyArgs.getMultipartUploadID(), volumeName, bucketName, keyName, omMetadataManager);
} catch (OMException ome) {
throw new OMException("Abort Multipart Upload Failed: volume: " + requestedVolume + ", bucket: " + requestedBucket + ", key: " + keyName, ome, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartOpenKey);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// upload initiated for this key.
if (omKeyInfo == null) {
throw new OMException("Abort Multipart Upload Failed: volume: " + requestedVolume + "bucket: " + requestedBucket + "key: " + keyName, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
multipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartKey);
multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// When abort uploaded key, we need to subtract the PartKey length from
// the volume usedBytes.
long quotaReleased = 0;
int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes();
Iterator iter = multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
PartKeyInfo iterPartKeyInfo = (PartKeyInfo) entry.getValue();
quotaReleased += iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor;
}
omBucketInfo.incrUsedBytes(-quotaReleased);
// Update cache of openKeyTable and multipartInfo table.
// No need to add the cache entries to delete table, as the entries
// in delete table are not used by any read/write operations.
omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(multipartOpenKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
omClientResponse = getOmClientResponse(ozoneManager, multipartKeyInfo, multipartKey, multipartOpenKey, omResponse, omBucketInfo);
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = getOmClientResponse(exception, omResponse);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (acquiredLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
// audit log
auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(OMAction.ABORT_MULTIPART_UPLOAD, auditMap, exception, getOmRequest().getUserInfo()));
switch(result) {
case SUCCESS:
LOG.debug("Abort Multipart request is successfully completed for " + "KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName, bucketName);
break;
case FAILURE:
ozoneManager.getMetrics().incNumAbortMultipartUploadFails();
LOG.error("Abort Multipart request is failed for KeyName {} in " + "VolumeName/Bucket {}/{}", keyName, volumeName, bucketName, exception);
break;
default:
LOG.error("Unrecognized Result for S3MultipartUploadAbortRequest: {}", multipartUploadAbortRequest);
}
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo in project ozone by apache.
the class S3MultipartUploadCompleteRequest method validateAndUpdateCache.
@Override
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
MultipartUploadCompleteRequest multipartUploadCompleteRequest = getOmRequest().getCompleteMultiPartUploadRequest();
KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs();
List<OzoneManagerProtocolProtos.Part> partsList = multipartUploadCompleteRequest.getPartsListList();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
String uploadID = keyArgs.getMultipartUploadID();
String multipartKey = null;
ozoneManager.getMetrics().incNumCompleteMultipartUploads();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquiredLock = false;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
IOException exception = null;
Result result = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, uploadID);
// check Acl
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, OzoneObj.ResourceType.KEY);
acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
String dbOzoneKey = getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName);
String dbMultipartOpenKey = getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID, omMetadataManager);
OmMultipartKeyInfo multipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartKey);
// Check for directory exists with same name, if it exists throw error.
checkDirectoryAlreadyExists(ozoneManager, volumeName, bucketName, keyName, omMetadataManager);
if (multipartKeyInfo == null) {
throw new OMException(failureMessage(requestedVolume, requestedBucket, keyName), OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
TreeMap<Integer, PartKeyInfo> partKeyInfoMap = multipartKeyInfo.getPartKeyInfoMap();
if (partsList.size() > 0) {
if (partKeyInfoMap.size() == 0) {
LOG.error("Complete MultipartUpload failed for key {} , MPU Key has" + " no parts in OM, parts given to upload are {}", ozoneKey, partsList);
throw new OMException(failureMessage(requestedVolume, requestedBucket, keyName), OMException.ResultCodes.INVALID_PART);
}
// First Check for Invalid Part Order.
List<Integer> partNumbers = new ArrayList<>();
int partsListSize = getPartsListSize(requestedVolume, requestedBucket, keyName, ozoneKey, partNumbers, partsList);
List<OmKeyLocationInfo> partLocationInfos = new ArrayList<>();
long dataSize = getMultipartDataSize(requestedVolume, requestedBucket, keyName, ozoneKey, partKeyInfoMap, partsListSize, partLocationInfos, partsList, ozoneManager);
// All parts have same replication information. Here getting from last
// part.
OmKeyInfo omKeyInfo = getOmKeyInfo(ozoneManager, trxnLogIndex, keyArgs, volumeName, bucketName, keyName, dbMultipartOpenKey, omMetadataManager, dbOzoneKey, partKeyInfoMap, partLocationInfos, dataSize);
// Find all unused parts.
List<OmKeyInfo> unUsedParts = new ArrayList<>();
for (Map.Entry<Integer, PartKeyInfo> partKeyInfo : partKeyInfoMap.entrySet()) {
if (!partNumbers.contains(partKeyInfo.getKey())) {
unUsedParts.add(OmKeyInfo.getFromProtobuf(partKeyInfo.getValue().getPartKeyInfo()));
}
}
// If bucket versioning is turned on during the update, between key
// creation and key commit, old versions will be just overwritten and
// not kept. Bucket versioning will be effective from the first key
// creation after the knob turned on.
RepeatedOmKeyInfo oldKeyVersionsToDelete = null;
OmKeyInfo keyToDelete = omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey);
if (keyToDelete != null && !omBucketInfo.getIsVersionEnabled()) {
oldKeyVersionsToDelete = getOldVersionsToCleanUp(dbOzoneKey, keyToDelete, omMetadataManager, trxnLogIndex, ozoneManager.isRatisEnabled());
}
long usedBytesDiff = 0;
if (keyToDelete != null) {
long numCopy = keyToDelete.getReplicationConfig().getRequiredNodes();
usedBytesDiff -= keyToDelete.getDataSize() * numCopy;
}
String dbBucketKey = null;
if (usedBytesDiff != 0) {
omBucketInfo.incrUsedBytes(usedBytesDiff);
dbBucketKey = omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), omBucketInfo.getBucketName());
} else {
// If no bucket size changed, prevent from updating bucket object
omBucketInfo = null;
}
updateCache(omMetadataManager, dbBucketKey, omBucketInfo, dbOzoneKey, dbMultipartOpenKey, multipartKey, omKeyInfo, trxnLogIndex);
if (oldKeyVersionsToDelete != null) {
OMFileRequest.addDeletedTableCacheEntry(omMetadataManager, dbOzoneKey, oldKeyVersionsToDelete, trxnLogIndex);
}
omResponse.setCompleteMultiPartUploadResponse(MultipartUploadCompleteResponse.newBuilder().setVolume(requestedVolume).setBucket(requestedBucket).setKey(keyName).setHash(DigestUtils.sha256Hex(keyName)));
omClientResponse = getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey, omKeyInfo, unUsedParts, omBucketInfo, oldKeyVersionsToDelete);
result = Result.SUCCESS;
} else {
throw new OMException(failureMessage(requestedVolume, requestedBucket, keyName) + " because of empty part list", OMException.ResultCodes.INVALID_REQUEST);
}
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = getOmClientResponse(omResponse, exception);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (acquiredLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
logResult(ozoneManager, multipartUploadCompleteRequest, partsList, auditMap, volumeName, bucketName, keyName, exception, result);
return omClientResponse;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo in project ozone by apache.
the class S3MultipartUploadCompleteRequest method getMultipartDataSize.
@SuppressWarnings("checkstyle:ParameterNumber")
private long getMultipartDataSize(String requestedVolume, String requestedBucket, String keyName, String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap, int partsListSize, List<OmKeyLocationInfo> partLocationInfos, List<OzoneManagerProtocolProtos.Part> partsList, OzoneManager ozoneManager) throws OMException {
long dataSize = 0;
int currentPartCount = 0;
// Now do actual logic, and check for any Invalid part during this.
for (OzoneManagerProtocolProtos.Part part : partsList) {
currentPartCount++;
int partNumber = part.getPartNumber();
String partName = part.getPartName();
PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber);
String dbPartName = null;
if (partKeyInfo != null) {
dbPartName = partKeyInfo.getPartName();
}
if (!StringUtils.equals(partName, dbPartName)) {
String omPartName = partKeyInfo == null ? null : dbPartName;
throw new OMException(failureMessage(requestedVolume, requestedBucket, keyName) + ". Provided Part info is { " + partName + ", " + partNumber + "}, whereas OM has partName " + omPartName, OMException.ResultCodes.INVALID_PART);
}
OmKeyInfo currentPartKeyInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
// Except for last part all parts should have minimum size.
if (currentPartCount != partsListSize) {
if (currentPartKeyInfo.getDataSize() < ozoneManager.getMinMultipartUploadPartSize()) {
LOG.error("MultipartUpload: {} Part number: {} size {} is less" + " than minimum part size {}", ozoneKey, partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(), ozoneManager.getMinMultipartUploadPartSize());
throw new OMException(failureMessage(requestedVolume, requestedBucket, keyName) + ". Entity too small.", OMException.ResultCodes.ENTITY_TOO_SMALL);
}
}
// As all part keys will have only one version.
OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo.getKeyLocationVersions().get(0);
// Set partNumber in each block.
currentKeyInfoGroup.getLocationList().forEach(omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
dataSize += currentPartKeyInfo.getDataSize();
}
return dataSize;
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo in project ozone by apache.
the class S3MultipartUploadAbortResponse method addToDBBatch.
@Override
public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException {
// Delete from openKey table and multipart info table.
omMetadataManager.getOpenKeyTable(getBucketLayout()).deleteWithBatch(batchOperation, multipartOpenKey);
omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation, multipartKey);
// Move all the parts to delete table
TreeMap<Integer, PartKeyInfo> partKeyInfoMap = omMultipartKeyInfo.getPartKeyInfoMap();
for (Map.Entry<Integer, PartKeyInfo> partKeyInfoEntry : partKeyInfoMap.entrySet()) {
PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue();
OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());
repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(currentKeyPartInfo, repeatedOmKeyInfo, omMultipartKeyInfo.getUpdateID(), isRatisEnabled);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation, partKeyInfo.getPartName(), repeatedOmKeyInfo);
// update bucket usedBytes.
omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), omBucketInfo.getBucketName()), omBucketInfo);
}
}
use of org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo in project ozone by apache.
the class TestS3MultipartUploadCompleteResponseWithFSO method commitS3MultipartUpload.
@SuppressWarnings("parameterNumber")
private OmKeyInfo commitS3MultipartUpload(String volumeName, String bucketName, String keyName, String multipartUploadID, String fileName, String multipartKey, OmMultipartKeyInfo omMultipartKeyInfo, int deleteEntryCount) throws IOException {
PartKeyInfo part1 = createPartKeyInfoFSO(volumeName, bucketName, parentID, fileName, 1);
addPart(1, part1, omMultipartKeyInfo);
long clientId = Time.now();
String openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientId);
S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = createS3CommitMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, omMultipartKeyInfo.getPartKeyInfo(1), omMultipartKeyInfo, OzoneManagerProtocolProtos.Status.OK, openKey);
s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
Assert.assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));
omMetadataManager.getStore().commitBatchOperation(batchOperation);
// As 1 parts are created, so 1 entry should be there in delete table.
Assert.assertEquals(deleteEntryCount, omMetadataManager.countRowsInTable(omMetadataManager.getDeletedTable()));
String part1DeletedKeyName = omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(part1DeletedKeyName));
RepeatedOmKeyInfo ro = omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
OmKeyInfo omPartKeyInfo = OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo());
Assert.assertEquals(omPartKeyInfo, ro.getOmKeyInfoList().get(0));
return omPartKeyInfo;
}
Aggregations