use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.
the class RpcClient method completeMultipartUpload.
@Override
public OmMultipartUploadCompleteInfo completeMultipartUpload(String volumeName, String bucketName, String keyName, String uploadID, Map<Integer, String> partsMap) throws IOException {
verifyVolumeName(volumeName);
verifyBucketName(bucketName);
HddsClientUtils.checkNotNull(keyName, uploadID);
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setMultipartUploadID(uploadID).setAcls(getAclList()).build();
OmMultipartUploadCompleteList omMultipartUploadCompleteList = new OmMultipartUploadCompleteList(partsMap);
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneManagerClient.completeMultipartUpload(keyArgs, omMultipartUploadCompleteList);
return omMultipartUploadCompleteInfo;
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.
the class ObjectEndpoint method completeMultipartUpload.
/**
* Complete a multipart upload.
*/
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
OzoneBucket ozoneBucket = getBucket(bucket);
// Using LinkedHashMap to preserve ordering of parts list.
Map<Integer, String> partsMap = new LinkedHashMap<>();
List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
try {
for (CompleteMultipartUploadRequest.Part part : partList) {
partsMap.put(part.getPartNumber(), part.geteTag());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Parts map {}", partsMap);
}
omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
completeMultipartUploadResponse.setBucket(bucket);
completeMultipartUploadResponse.setKey(key);
completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
// Location also setting as bucket name.
completeMultipartUploadResponse.setLocation(bucket);
return Response.status(Status.OK).entity(completeMultipartUploadResponse).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.INVALID_PART) {
throw newError(S3ErrorTable.INVALID_PART, key, ex);
} else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
} else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw newError(NO_SUCH_UPLOAD, uploadID, ex);
} else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
throw newError(ENTITY_TOO_SMALL, key, ex);
} else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
throw os3Exception;
} else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
throw os3Exception;
}
LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
throw ex;
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.
the class TestOzoneRpcClientAbstract method testCommitPartAfterCompleteUpload.
@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
String uploadID = omMultipartInfo.getUploadID();
// upload part 1.
byte[] data = generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong());
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID);
ozoneOutputStream.write(data, 0, data.length);
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
// Do not close output stream for part 2.
ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID());
ozoneOutputStream.write(data, 0, data.length);
Map<Integer, String> partsMap = new LinkedHashMap<>();
partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
Assert.assertNotNull(omMultipartCommitUploadPartInfo);
byte[] fileContent = new byte[data.length];
OzoneInputStream inputStream = bucket.readKey(keyName);
inputStream.read(fileContent);
StringBuilder sb = new StringBuilder(data.length);
// Combine all parts data, and check is it matching with get key data.
String part1 = new String(data, UTF_8);
sb.append(part1);
Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
try {
ozoneOutputStream.close();
fail("testCommitPartAfterCompleteUpload failed");
} catch (IOException ex) {
assertTrue(ex instanceof OMException);
assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method testCommitPartAfterCompleteUpload.
@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
String parentDir = "a/b/c/d/";
String keyName = parentDir + UUID.randomUUID().toString();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
Assert.assertNotNull(omMultipartInfo.getUploadID());
String uploadID = omMultipartInfo.getUploadID();
// upload part 1.
byte[] data = generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong());
OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID);
ozoneOutputStream.write(data, 0, data.length);
ozoneOutputStream.close();
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
// Do not close output stream for part 2.
ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID());
ozoneOutputStream.write(data, 0, data.length);
Map<Integer, String> partsMap = new LinkedHashMap<>();
partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
Assert.assertNotNull(omMultipartUploadCompleteInfo);
Assert.assertNotNull(omMultipartCommitUploadPartInfo);
byte[] fileContent = new byte[data.length];
OzoneInputStream inputStream = bucket.readKey(keyName);
inputStream.read(fileContent);
StringBuilder sb = new StringBuilder(data.length);
// Combine all parts data, and check is it matching with get key data.
String part1 = new String(data, UTF_8);
sb.append(part1);
Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
try {
ozoneOutputStream.close();
Assert.fail("testCommitPartAfterCompleteUpload failed");
} catch (IOException ex) {
Assert.assertTrue(ex instanceof OMException);
Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
}
}
use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.
the class TestOzoneClientMultipartUploadWithFSO method completeMultipartUpload.
private void completeMultipartUpload(OzoneBucket bucket, String keyName, String uploadID, Map<Integer, String> partsMap) throws Exception {
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
Assert.assertNotNull(omMultipartUploadCompleteInfo);
Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket.getName());
Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket.getVolumeName());
Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
}
Aggregations