use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.
the class ObjectEndpoint method initializeMultipartUpload.
/**
* Initialize MultiPartUpload request.
* <p>
* Note: the specific content type is set by the HeaderPreprocessor.
*/
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
S3StorageType s3StorageType;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
} else {
s3StorageType = toS3StorageType(storageType);
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationType, replicationFactor);
MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
multipartUploadInitiateResponse.setBucket(bucket);
multipartUploadInitiateResponse.setKey(key);
multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
return Response.status(Status.OK).entity(multipartUploadInitiateResponse).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
}
LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
throw ex;
}
}
use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.
the class ObjectEndpoint method put.
/**
* Rest endpoint to upload object to a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
* more details.
*/
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
OzoneOutputStream output = null;
if (uploadID != null && !uploadID.equals("")) {
// If uploadID is specified, it is a request for upload part
return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
}
try {
String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
S3StorageType s3StorageType;
boolean storageTypeDefault;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
storageTypeDefault = true;
} else {
s3StorageType = toS3StorageType(storageType);
storageTypeDefault = false;
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
if (copyHeader != null) {
// Copy object, as copy source available.
CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
}
// Normal put object
OzoneBucket bucket = getBucket(bucketName);
output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
IOUtils.copy(body, output);
return Response.ok().status(HttpStatus.SC_OK).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
throw os3Exception;
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
}
LOG.error("Exception occurred in PutObject", ex);
throw ex;
} finally {
if (output != null) {
output.close();
}
}
}
use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.
the class TestObjectPut method testInvalidStorageType.
@Test
public void testInvalidStorageType() throws IOException {
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
objectEndpoint.setHeaders(headers);
keyName = "sourceKey";
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random");
try {
objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
fail("testInvalidStorageType");
} catch (OS3Exception ex) {
assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), ex.getErrorMessage());
assertEquals("random", ex.getResource());
}
}
use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.
the class TestAbortMultipartUpload method testAbortMultipartUpload.
@Test
public void testAbortMultipartUpload() throws Exception {
String bucket = OzoneConsts.S3_BUCKET;
String key = OzoneConsts.KEY;
OzoneClient client = new OzoneClientStub();
client.getObjectStore().createS3Bucket(bucket);
HttpHeaders headers = Mockito.mock(HttpHeaders.class);
when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("STANDARD");
ObjectEndpoint rest = new ObjectEndpoint();
rest.setHeaders(headers);
rest.setClient(client);
rest.setOzoneConfiguration(new OzoneConfiguration());
Response response = rest.initializeMultipartUpload(bucket, key);
assertEquals(200, response.getStatus());
MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity();
assertNotNull(multipartUploadInitiateResponse.getUploadID());
String uploadID = multipartUploadInitiateResponse.getUploadID();
// Abort multipart upload
response = rest.delete(bucket, key, uploadID);
assertEquals(204, response.getStatus());
// test with unknown upload Id.
try {
rest.delete(bucket, key, "random");
} catch (OS3Exception ex) {
assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode());
assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage());
}
}
Aggregations