use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestOzoneRpcClientAbstract method testAbortUploadFailWithInProgressPartUpload.
@Test
public void testAbortUploadFailWithInProgressPartUpload() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
// Do not close output stream.
byte[] data = "data".getBytes(UTF_8);
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, omMultipartInfo.getUploadID());
ozoneOutputStream.write(data, 0, data.length);
// Abort before completing part upload.
bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID());
try {
ozoneOutputStream.close();
fail("testAbortUploadFailWithInProgressPartUpload failed");
} catch (IOException ex) {
assertTrue(ex instanceof OMException);
assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestOzoneRpcClientAbstract method initiateMultipartUpload.
private String initiateMultipartUpload(OzoneBucket bucket, String keyName, ReplicationType replicationType, ReplicationFactor replicationFactor) throws Exception {
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, replicationType, replicationFactor);
String uploadID = multipartInfo.getUploadID();
Assert.assertNotNull(uploadID);
return uploadID;
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestOzoneRpcClientAbstract method testUploadPartOverrideWithStandAlone.
@Test
public void testUploadPartOverrideWithStandAlone() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
String sampleData = "sample Value";
int partNumber = 1;
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
Assert.assertEquals(bucketName, multipartInfo.getBucketName());
Assert.assertEquals(keyName, multipartInfo.getKeyName());
assertNotNull(multipartInfo.getUploadID());
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID);
ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length());
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
assertNotNull(commitUploadPartInfo);
String partName = commitUploadPartInfo.getPartName();
assertNotNull(commitUploadPartInfo.getPartName());
// Overwrite the part by creating part key with same part number
// and different content.
sampleData = "sample Data Changed";
ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID);
ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length());
ozoneOutputStream.close();
commitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
assertNotNull(commitUploadPartInfo);
assertNotNull(commitUploadPartInfo.getPartName());
// AWS S3 for same content generates same partName during upload part.
// In AWS S3 ETag is generated from md5sum. In Ozone right now we
// don't do this. For now to make things work for large file upload
// through aws s3 cp, the partName are generated in a predictable fashion.
// So, when a part is override partNames will still be same irrespective
// of content in ozone s3. This will make S3 Mpu completeMPU pass when
// comparing part names and large file uploads work using aws cp.
assertEquals("Part names should be same", partName, commitUploadPartInfo.getPartName());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class TestOzoneRpcClientAbstract method testInitiateMultipartUploadWithDefaultReplication.
@Test
public void testInitiateMultipartUploadWithDefaultReplication() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
Assert.assertEquals(bucketName, multipartInfo.getBucketName());
Assert.assertEquals(keyName, multipartInfo.getKeyName());
assertNotNull(multipartInfo.getUploadID());
// Call initiate multipart upload for the same key again, this should
// generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName);
assertNotNull(multipartInfo);
Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
Assert.assertEquals(bucketName, multipartInfo.getBucketName());
Assert.assertEquals(keyName, multipartInfo.getKeyName());
assertNotEquals(multipartInfo.getUploadID(), uploadID);
assertNotNull(multipartInfo.getUploadID());
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartInfo in project ozone by apache.
the class OzoneManagerProtocolClientSideTranslatorPB method initiateMultipartUpload.
/**
* Return the proxy object underlying this protocol translator.
*
* @return the proxy object underlying this protocol translator.
*/
@Override
public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws IOException {
MultipartInfoInitiateRequest.Builder multipartInfoInitiateRequest = MultipartInfoInitiateRequest.newBuilder();
KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(omKeyArgs.getVolumeName()).setBucketName(omKeyArgs.getBucketName()).setKeyName(omKeyArgs.getKeyName()).addAllAcls(omKeyArgs.getAcls().stream().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList()));
if (omKeyArgs.getReplicationConfig() != null) {
keyArgs.setFactor(ReplicationConfig.getLegacyFactor(omKeyArgs.getReplicationConfig()));
keyArgs.setType(omKeyArgs.getReplicationConfig().getReplicationType());
}
multipartInfoInitiateRequest.setKeyArgs(keyArgs.build());
OMRequest omRequest = createOMRequest(Type.InitiateMultiPartUpload).setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest.build()).build();
MultipartInfoInitiateResponse resp = handleError(submitRequest(omRequest)).getInitiateMultiPartUploadResponse();
return new OmMultipartInfo(resp.getVolumeName(), resp.getBucketName(), resp.getKeyName(), resp.getMultipartUploadID());
}
Aggregations