use of org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part in project ozone by apache.
the class ObjectEndpoint method completeMultipartUpload.
/**
* Complete a multipart upload.
*/
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
OzoneBucket ozoneBucket = getBucket(bucket);
// Using LinkedHashMap to preserve ordering of parts list.
Map<Integer, String> partsMap = new LinkedHashMap<>();
List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
try {
for (CompleteMultipartUploadRequest.Part part : partList) {
partsMap.put(part.getPartNumber(), part.geteTag());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Parts map {}", partsMap);
}
omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
completeMultipartUploadResponse.setBucket(bucket);
completeMultipartUploadResponse.setKey(key);
completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
// Location also setting as bucket name.
completeMultipartUploadResponse.setLocation(bucket);
return Response.status(Status.OK).entity(completeMultipartUploadResponse).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.INVALID_PART) {
throw newError(S3ErrorTable.INVALID_PART, key, ex);
} else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
} else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw newError(NO_SUCH_UPLOAD, uploadID, ex);
} else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
throw newError(ENTITY_TOO_SMALL, key, ex);
} else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
throw os3Exception;
} else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
throw os3Exception;
}
LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
throw ex;
}
}
use of org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part in project ozone by apache.
the class ObjectEndpoint method put.
/**
* Rest endpoint to upload object to a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
* more details.
*/
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
OzoneOutputStream output = null;
if (uploadID != null && !uploadID.equals("")) {
// If uploadID is specified, it is a request for upload part
return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
}
try {
String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
S3StorageType s3StorageType;
boolean storageTypeDefault;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
storageTypeDefault = true;
} else {
s3StorageType = toS3StorageType(storageType);
storageTypeDefault = false;
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
if (copyHeader != null) {
// Copy object, as copy source available.
CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
}
// Normal put object
OzoneBucket bucket = getBucket(bucketName);
output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
IOUtils.copy(body, output);
return Response.ok().status(HttpStatus.SC_OK).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
throw os3Exception;
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
}
LOG.error("Exception occurred in PutObject", ex);
throw ex;
} finally {
if (output != null) {
output.close();
}
}
}
use of org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part in project ozone by apache.
the class TestMultipartUploadWithCopy method uploadPart.
private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception {
setHeaders();
ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8));
Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body);
assertEquals(200, response.getStatus());
assertNotNull(response.getHeaderString("ETag"));
Part part = new Part();
part.seteTag(response.getHeaderString("ETag"));
part.setPartNumber(partNumber);
return part;
}
use of org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part in project ozone by apache.
the class TestMultipartUploadWithCopy method testMultipart.
@Test
public void testMultipart() throws Exception {
// Initiate multipart upload
String uploadID = initiateMultipartUpload(KEY);
List<Part> partsList = new ArrayList<>();
// Upload parts
String content = "Multipart Upload 1";
Part part1 = uploadPart(KEY, uploadID, 1, content);
partsList.add(part1);
Part part2 = uploadPartWithCopy(KEY, uploadID, 2, OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY, null);
partsList.add(part2);
Part part3 = uploadPartWithCopy(KEY, uploadID, 3, OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY, "bytes=0-3");
partsList.add(part3);
Part part4 = uploadPartWithCopy(KEY, uploadID, 3, OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY, "bytes=0-3", beforeSourceKeyModificationTimeStr, afterSourceKeyModificationTimeStr);
partsList.add(part4);
// complete multipart upload
CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest();
completeMultipartUploadRequest.setPartList(partsList);
completeMultipartUpload(KEY, completeMultipartUploadRequest, uploadID);
OzoneBucket bucket = CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET);
try (InputStream is = bucket.readKey(KEY)) {
String keyContent = new Scanner(is, UTF_8.name()).useDelimiter("\\A").next();
Assert.assertEquals(content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT.substring(0, 4), keyContent);
}
}
use of org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part in project ozone by apache.
the class TestMultipartUploadComplete method testMultipartInvalidPartError.
@Test
public void testMultipartInvalidPartError() throws Exception {
// Initiate multipart upload
String key = UUID.randomUUID().toString();
String uploadID = initiateMultipartUpload(key);
List<Part> partsList = new ArrayList<>();
// Upload parts
String content = "Multipart Upload 1";
int partNumber = 1;
Part part1 = uploadPart(key, uploadID, partNumber, content);
// Change part name.
part1.seteTag("random");
partsList.add(part1);
content = "Multipart Upload 2";
partNumber = 2;
Part part2 = uploadPart(key, uploadID, partNumber, content);
partsList.add(part2);
// complete multipart upload
CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest();
completeMultipartUploadRequest.setPartList(partsList);
try {
completeMultipartUpload(key, completeMultipartUploadRequest, uploadID);
fail("testMultipartInvalidPartError");
} catch (OS3Exception ex) {
assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode());
}
}
Aggregations