use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3MultipartUploadCompleteResponseWithFSO method runAddDBToBatchWithParts.
private long runAddDBToBatchWithParts(String volumeName, String bucketName, String keyName, int deleteEntryCount) throws Exception {
String multipartUploadID = UUID.randomUUID().toString();
String fileName = OzoneFSUtils.getFileName(keyName);
String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
String dbMultipartOpenKey = omMetadataManager.getMultipartKey(parentID, fileName, multipartUploadID);
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO = addS3InitiateMultipartUpload(volumeName, bucketName, keyName, multipartUploadID);
// Add some dummy parts for testing.
// Not added any key locations, as this just test is to see entries are
// adding to delete table or not.
OmMultipartKeyInfo omMultipartKeyInfo = s3InitiateMultipartUploadResponseFSO.getOmMultipartKeyInfo();
// After commits, it adds an entry to the deleted table. Incrementing the
// variable before the method call, because this method also has entry
// count check inside.
deleteEntryCount++;
OmKeyInfo omKeyInfoFSO = commitS3MultipartUpload(volumeName, bucketName, keyName, multipartUploadID, fileName, dbMultipartKey, omMultipartKeyInfo, deleteEntryCount);
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentID + 9, parentID, 100, Time.now());
List<OmKeyInfo> unUsedParts = new ArrayList<>();
unUsedParts.add(omKeyInfo);
S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = createS3CompleteMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, omKeyInfoFSO, OzoneManagerProtocolProtos.Status.OK, unUsedParts, omBucketInfo, null);
s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, batchOperation);
omMetadataManager.getStore().commitBatchOperation(batchOperation);
String dbKey = omMetadataManager.getOzonePathKey(parentID, omKeyInfoFSO.getFileName());
Assert.assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get(dbKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable().get(dbMultipartKey));
Assert.assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbMultipartOpenKey));
return omKeyInfoFSO.getObjectID();
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3MultipartUploadCommitPartResponseWithFSO method testAddDBToBatchWithParts.
@Test
public void testAddDBToBatchWithParts() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = getKeyName();
OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager);
createParentPath(volumeName, bucketName);
String multipartUploadID = UUID.randomUUID().toString();
String fileName = OzoneFSUtils.getFileName(keyName);
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO = createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, new ArrayList<>());
s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager, batchOperation);
// Add some dummy parts for testing.
// Not added any key locations, as this just test is to see entries are
// adding to delete table or not.
OmMultipartKeyInfo omMultipartKeyInfo = s3InitiateMultipartUploadResponseFSO.getOmMultipartKeyInfo();
PartKeyInfo part1 = createPartKeyInfoFSO(volumeName, bucketName, parentID, fileName, 1);
addPart(1, part1, omMultipartKeyInfo);
long clientId = Time.now();
String openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientId);
S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = createS3CommitMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, omMultipartKeyInfo.getPartKeyInfo(1), omMultipartKeyInfo, OzoneManagerProtocolProtos.Status.OK, openKey);
s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
Assert.assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(openKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));
omMetadataManager.getStore().commitBatchOperation(batchOperation);
// As 1 parts are created, so 1 entry should be there in delete table.
Assert.assertEquals(1, omMetadataManager.countRowsInTable(omMetadataManager.getDeletedTable()));
String part1DeletedKeyName = omMultipartKeyInfo.getPartKeyInfo(1).getPartName();
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(part1DeletedKeyName));
RepeatedOmKeyInfo ro = omMetadataManager.getDeletedTable().get(part1DeletedKeyName);
Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()), ro.getOmKeyInfoList().get(0));
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3MultipartUploadCommitPartResponseWithFSO method testWithMultipartUploadError.
@Test
public void testWithMultipartUploadError() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = getKeyName();
OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager);
createParentPath(volumeName, bucketName);
String multipartUploadID = UUID.randomUUID().toString();
String fileName = OzoneFSUtils.getFileName(keyName);
String multipartKey = omMetadataManager.getMultipartKey(parentID, fileName, multipartUploadID);
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO = createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, new ArrayList<>());
s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager, batchOperation);
// Add some dummy parts for testing.
// Not added any key locations, as this just test is to see entries are
// adding to delete table or not.
OmMultipartKeyInfo omMultipartKeyInfo = s3InitiateMultipartUploadResponseFSO.getOmMultipartKeyInfo();
PartKeyInfo part1 = createPartKeyInfoFSO(volumeName, bucketName, parentID, fileName, 1);
addPart(1, part1, omMultipartKeyInfo);
long clientId = Time.now();
String openKey = omMetadataManager.getOpenFileName(parentID, fileName, clientId);
S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = createS3CommitMPUResponseFSO(volumeName, bucketName, parentID, keyName + "invalid", multipartUploadID, omMultipartKeyInfo.getPartKeyInfo(1), omMultipartKeyInfo, OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR, openKey);
s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
Assert.assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(openKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable().get(multipartKey));
omMetadataManager.getStore().commitBatchOperation(batchOperation);
// openkey entry should be there in delete table.
Assert.assertEquals(1, omMetadataManager.countRowsInTable(omMetadataManager.getDeletedTable()));
Assert.assertNotNull(omMetadataManager.getDeletedTable().get(openKey));
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method verifyPartNamesInDB.
private void verifyPartNamesInDB(String volumeName, String bucketName, String keyName, Map<Integer, String> partsMap, OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts, String uploadID) throws IOException {
List<String> listPartNames = new ArrayList<>();
String keyPartName = verifyPartNames(partsMap, 0, ozoneMultipartUploadPartListParts);
listPartNames.add(keyPartName);
keyPartName = verifyPartNames(partsMap, 1, ozoneMultipartUploadPartListParts);
listPartNames.add(keyPartName);
keyPartName = verifyPartNames(partsMap, 2, ozoneMultipartUploadPartListParts);
listPartNames.add(keyPartName);
OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager();
String multipartKey = metadataMgr.getMultipartKey(volumeName, bucketName, keyName, uploadID);
OmMultipartKeyInfo omMultipartKeyInfo = metadataMgr.getMultipartInfoTable().get(multipartKey);
Assert.assertNotNull(omMultipartKeyInfo);
TreeMap<Integer, OzoneManagerProtocolProtos.PartKeyInfo> partKeyInfoMap = omMultipartKeyInfo.getPartKeyInfoMap();
for (Map.Entry<Integer, OzoneManagerProtocolProtos.PartKeyInfo> entry : partKeyInfoMap.entrySet()) {
OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo = entry.getValue();
String partKeyName = partKeyInfo.getPartName();
// reconstruct full part name with volume, bucket, partKeyName
String fullKeyPartName = metadataMgr.getOzoneKey(volumeName, bucketName, keyName);
// partKeyName format in DB - partKeyName + ClientID
Assert.assertTrue("Invalid partKeyName format in DB: " + partKeyName + ", expected name:" + fullKeyPartName, partKeyName.startsWith(fullKeyPartName));
listPartNames.remove(partKeyName);
}
Assert.assertTrue("Wrong partKeyName format in DB!", listPartNames.isEmpty());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo in project ozone by apache.
the class TestS3InitiateMultipartUploadResponseWithFSO method testAddDBToBatch.
@Test
public void testAddDBToBatch() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String prefix = "a/b/c/d/";
String fileName = UUID.randomUUID().toString();
String keyName = prefix + fileName;
String multipartUploadID = UUID.randomUUID().toString();
// assume objectID of dir path "a/b/c/d" is 1027
long parentID = 1027;
List<OmDirectoryInfo> parentDirInfos = new ArrayList<>();
S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponseFSO = createS3InitiateMPUResponseFSO(volumeName, bucketName, parentID, keyName, multipartUploadID, parentDirInfos);
s3InitiateMultipartUploadResponseFSO.addToDBBatch(omMetadataManager, batchOperation);
// Do manual commit and see whether addToBatch is successful or not.
omMetadataManager.getStore().commitBatchOperation(batchOperation);
String multipartKey = omMetadataManager.getMultipartKey(volumeName, bucketName, keyName, multipartUploadID);
String multipartOpenKey = omMetadataManager.getMultipartKey(parentID, fileName, multipartUploadID);
OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartOpenKey);
Assert.assertNotNull("Failed to find the fileInfo", omKeyInfo);
Assert.assertEquals("FileName mismatches!", fileName, omKeyInfo.getKeyName());
Assert.assertEquals("ParentId mismatches!", parentID, omKeyInfo.getParentObjectID());
OmMultipartKeyInfo omMultipartKeyInfo = omMetadataManager.getMultipartInfoTable().get(multipartKey);
Assert.assertNotNull("Failed to find the multipartFileInfo", omMultipartKeyInfo);
Assert.assertEquals("ParentId mismatches!", parentID, omMultipartKeyInfo.getParentID());
Assert.assertEquals("Upload Id mismatches!", multipartUploadID, omMultipartKeyInfo.getUploadID());
}
Aggregations