use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestOzoneManagerHAWithData method initiateMultipartUpload.
private String initiateMultipartUpload(OzoneBucket ozoneBucket, String keyName) throws Exception {
OmMultipartInfo omMultipartInfo = ozoneBucket.initiateMultipartUpload(keyName, ReplicationType.RATIS, ReplicationFactor.ONE);
String uploadID = omMultipartInfo.getUploadID();
Assert.assertTrue(uploadID != null);
return uploadID;
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestKeyManagerUnit method listMultipartUploadPartsWithZeroUpload.
@Test
public void listMultipartUploadPartsWithZeroUpload() throws IOException {
// GIVEN
createBucket(metadataManager, "vol1", "bucket1");
OmMultipartInfo omMultipartInfo = initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1");
// WHEN
OmMultipartUploadListParts omMultipartUploadListParts = keyManager.listParts("vol1", "bucket1", "dir/key1", omMultipartInfo.getUploadID(), 0, 10);
Assert.assertEquals(0, omMultipartUploadListParts.getPartInfoList().size());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestKeyManagerUnit method listMultipartUploadsWithFewEntriesInCache.
@Test
public void listMultipartUploadsWithFewEntriesInCache() throws IOException {
String volume = UUID.randomUUID().toString();
String bucket = UUID.randomUUID().toString();
// GIVEN
createBucket(metadataManager, volume, bucket);
createBucket(metadataManager, volume, bucket);
// Add few to cache and few to DB.
addinitMultipartUploadToCache(volume, bucket, "dir/key1");
initMultipartUpload(writeClient, volume, bucket, "dir/key2");
addinitMultipartUploadToCache(volume, bucket, "dir/key3");
initMultipartUpload(writeClient, volume, bucket, "dir/key4");
// WHEN
OmMultipartUploadList omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "");
// THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(4, uploads.size());
Assert.assertEquals("dir/key1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/key2", uploads.get(1).getKeyName());
Assert.assertEquals("dir/key3", uploads.get(2).getKeyName());
Assert.assertEquals("dir/key4", uploads.get(3).getKeyName());
// Add few more to test prefix.
// Same way add few to cache and few to DB.
addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey1");
initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey2");
OmMultipartInfo omMultipartInfo3 = addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey3");
OmMultipartInfo omMultipartInfo4 = initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey4");
omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone");
// THEN
uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(4, uploads.size());
Assert.assertEquals("dir/ozonekey1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/ozonekey2", uploads.get(1).getKeyName());
Assert.assertEquals("dir/ozonekey3", uploads.get(2).getKeyName());
Assert.assertEquals("dir/ozonekey4", uploads.get(3).getKeyName());
// Abort multipart upload for key in DB.
abortMultipart(volume, bucket, "dir/ozonekey4", omMultipartInfo4.getUploadID());
// Now list.
omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone");
// THEN
uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(3, uploads.size());
Assert.assertEquals("dir/ozonekey1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/ozonekey2", uploads.get(1).getKeyName());
Assert.assertEquals("dir/ozonekey3", uploads.get(2).getKeyName());
// abort multipart upload for key in cache.
abortMultipart(volume, bucket, "dir/ozonekey3", omMultipartInfo3.getUploadID());
// Now list.
omMultipartUploadList = keyManager.listMultipartUploads(volume, bucket, "dir/ozone");
// THEN
uploads = omMultipartUploadList.getUploads();
Assert.assertEquals(2, uploads.size());
Assert.assertEquals("dir/ozonekey1", uploads.get(0).getKeyName());
Assert.assertEquals("dir/ozonekey2", uploads.get(1).getKeyName());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestOzoneFSWithObjectStoreCreate method testMPUFailDuetoDirectoryCreationBeforeComplete.
@Test
public void testMPUFailDuetoDirectoryCreationBeforeComplete() throws Exception {
OzoneVolume ozoneVolume = cluster.getRpcClient().getObjectStore().getVolume(volumeName);
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
String keyName = "/dir1/dir2/mpukey";
OmMultipartInfo omMultipartInfo = ozoneBucket.initiateMultipartUpload(keyName);
Assert.assertNotNull(omMultipartInfo);
OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(keyName, 10, 1, omMultipartInfo.getUploadID());
byte[] b = new byte[10];
Arrays.fill(b, (byte) 96);
ozoneOutputStream.write(b);
// Before close, create directory with same name.
o3fs.mkdirs(new Path(keyName));
// This should succeed, as we check during creation of part or during
// complete MPU.
ozoneOutputStream.close();
Map<Integer, String> partsMap = new HashMap<>();
partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
// Should fail, as we have directory with same name.
try {
ozoneBucket.completeMultipartUpload(keyName, omMultipartInfo.getUploadID(), partsMap);
fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed");
} catch (OMException ex) {
Assert.assertTrue(ex instanceof OMException);
Assert.assertEquals(NOT_A_FILE, ex.getResult());
}
// Delete directory
o3fs.delete(new Path(keyName), true);
// And try again for complete MPU. This should succeed.
ozoneBucket.completeMultipartUpload(keyName, omMultipartInfo.getUploadID(), partsMap);
try (FSDataInputStream ozoneInputStream = o3fs.open(new Path(keyName))) {
byte[] buffer = new byte[10];
// This read will not change the offset inside the file
int readBytes = ozoneInputStream.read(0, buffer, 0, 10);
String readData = new String(buffer, 0, readBytes, UTF_8);
Assert.assertEquals(new String(b, 0, b.length, UTF_8), readData);
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class ObjectEndpoint method initializeMultipartUpload.
/**
* Initialize MultiPartUpload request.
* <p>
* Note: the specific content type is set by the HeaderPreprocessor.
*/
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
S3StorageType s3StorageType;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
} else {
s3StorageType = toS3StorageType(storageType);
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationType, replicationFactor);
MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
multipartUploadInitiateResponse.setBucket(bucket);
multipartUploadInitiateResponse.setKey(key);
multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
return Response.status(Status.OK).entity(multipartUploadInitiateResponse).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
}
LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
throw ex;
}
}
Aggregations