use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3InitiateMultipartUploadRequestWithFSO method testValidateAndUpdateCache.
@Test
public void testValidateAndUpdateCache() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String prefix = "a/b/c/";
List<String> dirs = new ArrayList<String>();
dirs.add("a");
dirs.add("b");
dirs.add("c");
String fileName = UUID.randomUUID().toString();
String keyName = prefix + fileName;
// Add volume and bucket to DB.
OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager);
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
long bucketID = omBucketInfo.getObjectID();
OMRequest modifiedRequest = doPreExecuteInitiateMPUWithFSO(volumeName, bucketName, keyName);
S3InitiateMultipartUploadRequest s3InitiateMultipartUploadReqFSO = getS3InitiateMultipartUploadReq(modifiedRequest);
OMClientResponse omClientResponse = s3InitiateMultipartUploadReqFSO.validateAndUpdateCache(ozoneManager, 100L, ozoneManagerDoubleBufferHelper);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus());
long parentID = verifyDirectoriesInDB(dirs, bucketID);
String multipartFileKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getMultipartUploadID());
String multipartOpenFileKey = omMetadataManager.getMultipartKey(parentID, fileName, modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getMultipartUploadID());
OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable(s3InitiateMultipartUploadReqFSO.getBucketLayout()).get(multipartOpenFileKey);
Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
Assert.assertEquals("FileName mismatches!", fileName, omKeyInfo.getKeyName());
Assert.assertEquals("ParentId mismatches!", parentID, omKeyInfo.getParentObjectID());
OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartFileKey);
Assert.assertNotNull("Failed to find the multipartFileInfo", omMultipartKeyInfo);
Assert.assertEquals("ParentId mismatches!", parentID, omMultipartKeyInfo.getParentID());
Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getMultipartUploadID(), omMultipartKeyInfo.getUploadID());
Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime());
Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest().getKeyArgs().getModificationTime(), omKeyInfo.getCreationTime());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestOmMultipartKeyInfoCodec method testOmMultipartKeyInfoCodec.
@Test
public void testOmMultipartKeyInfoCodec() {
OmMultipartKeyInfoCodec codec = new OmMultipartKeyInfoCodec();
OmMultipartKeyInfo omMultipartKeyInfo = new OmMultipartKeyInfo.Builder().setUploadID(UUID.randomUUID().toString()).setCreationTime(Time.now()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).build();
byte[] data = new byte[0];
try {
data = codec.toPersistedFormat(omMultipartKeyInfo);
} catch (java.io.IOException e) {
e.printStackTrace();
}
Assert.assertNotNull(data);
OmMultipartKeyInfo multipartKeyInfo = null;
try {
multipartKeyInfo = codec.fromPersistedFormat(data);
} catch (java.io.IOException e) {
e.printStackTrace();
}
Assert.assertEquals(omMultipartKeyInfo, multipartKeyInfo);
// When random byte data passed returns null.
try {
codec.fromPersistedFormat("random".getBytes(UTF_8));
} catch (IllegalArgumentException ex) {
GenericTestUtils.assertExceptionContains("Can't encode the the raw " + "data from the byte array", ex);
} catch (java.io.IOException e) {
e.printStackTrace();
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method verifyUploadedPart.
private String verifyUploadedPart(String volumeName, String bucketName, String keyName, String uploadID, String partName, OMMetadataManager metadataMgr) throws IOException {
OzoneManager ozoneManager = cluster.getOzoneManager();
String buckKey = ozoneManager.getMetadataManager().getBucketKey(volumeName, bucketName);
OmBucketInfo buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
String multipartOpenKey = getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, metadataMgr);
String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName, keyName, uploadID);
OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey);
Assert.assertNotNull(omKeyInfo);
Assert.assertNotNull(omMultipartKeyInfo);
Assert.assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getKeyName());
Assert.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap = omMultipartKeyInfo.getPartKeyInfoMap();
for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry : partKeyInfoMap.entrySet()) {
OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
Assert.assertEquals(keyName, currentKeyPartInfo.getKeyName());
// verify dbPartName
Assert.assertEquals(partName, partKeyInfo.getPartName());
}
return multipartKey;
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method testAbortUploadSuccessWithParts.
@Test
public void testAbortUploadSuccessWithParts() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String parentDir = "a/b/c/d/";
String keyName = parentDir + UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OzoneManager ozoneManager = cluster.getOzoneManager();
String buckKey = ozoneManager.getMetadataManager().getBucketKey(volume.getName(), bucket.getName());
OmBucketInfo buckInfo = ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
BucketLayout bucketLayout = buckInfo.getBucketLayout();
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE);
String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager();
String multipartKey = verifyUploadedPart(volumeName, bucketName, keyName, uploadID, partName, metadataMgr);
bucket.abortMultipartUpload(keyName, uploadID);
String multipartOpenKey = getMultipartOpenKey(uploadID, volumeName, bucketName, keyName, metadataMgr);
OmKeyInfo omKeyInfo = metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey);
Assert.assertNull(omKeyInfo);
Assert.assertNull(omMultipartKeyInfo);
// Since deleteTable operation is performed via
// batchOp - Table.putWithBatch(), which is an async operation and
// not making any assertion for the same.
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class S3MultipartUploadAbortRequest method validateAndUpdateCache.
@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
MultipartUploadAbortRequest multipartUploadAbortRequest = getOmRequest().getAbortMultiPartUploadRequest();
OzoneManagerProtocolProtos.KeyArgs keyArgs = multipartUploadAbortRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
final String requestedVolume = volumeName;
final String requestedBucket = bucketName;
String keyName = keyArgs.getKeyName();
ozoneManager.getMetrics().incNumAbortMultipartUploads();
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquiredLock = false;
IOException exception = null;
OmMultipartKeyInfo multipartKeyInfo = null;
String multipartKey = null;
OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMClientResponse omClientResponse = null;
Result result = null;
OmBucketInfo omBucketInfo = null;
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
bucketName = keyArgs.getBucketName();
// check acl
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.WRITE, OzoneObj.ResourceType.KEY);
acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, keyArgs.getMultipartUploadID());
String multipartOpenKey;
try {
multipartOpenKey = getMultipartOpenKey(keyArgs.getMultipartUploadID(), volumeName, bucketName, keyName, omMetadataManager);
} catch (OMException ome) {
throw new OMException("Abort Multipart Upload Failed: volume: " + requestedVolume + ", bucket: " + requestedBucket + ", key: " + keyName, ome, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartOpenKey);
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
// upload initiated for this key.
if (omKeyInfo == null) {
throw new OMException("Abort Multipart Upload Failed: volume: " + requestedVolume + "bucket: " + requestedBucket + "key: " + keyName, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
}
multipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartKey);
multipartKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// When abort uploaded key, we need to subtract the PartKey length from
// the volume usedBytes.
long quotaReleased = 0;
int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes();
Iterator iter = multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
PartKeyInfo iterPartKeyInfo = (PartKeyInfo) entry.getValue();
quotaReleased += iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor;
}
omBucketInfo.incrUsedBytes(-quotaReleased);
// Update cache of openKeyTable and multipartInfo table.
// No need to add the cache entries to delete table, as the entries
// in delete table are not used by any read/write operations.
omMetadataManager.getOpenKeyTable(getBucketLayout()).addCacheEntry(new CacheKey<>(multipartOpenKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
omMetadataManager.getMultipartInfoTable().addCacheEntry(new CacheKey<>(multipartKey), new CacheValue<>(Optional.absent(), trxnLogIndex));
omClientResponse = getOmClientResponse(ozoneManager, multipartKeyInfo, multipartKey, multipartOpenKey, omResponse, omBucketInfo);
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
omClientResponse = getOmClientResponse(exception, omResponse);
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper);
if (acquiredLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
// audit log
auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(OMAction.ABORT_MULTIPART_UPLOAD, auditMap, exception, getOmRequest().getUserInfo()));
switch(result) {
case SUCCESS:
LOG.debug("Abort Multipart request is successfully completed for " + "KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName, bucketName);
break;
case FAILURE:
ozoneManager.getMetrics().incNumAbortMultipartUploadFails();
LOG.error("Abort Multipart request is failed for KeyName {} in " + "VolumeName/Bucket {}/{}", keyName, volumeName, bucketName, exception);
break;
default:
LOG.error("Unrecognized Result for S3MultipartUploadAbortRequest: {}", multipartUploadAbortRequest);
}
return omClientResponse;
}
Aggregations