Search in sources :

Example 1 with STORAGE_CLASS_HEADER

use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.

the class ObjectEndpoint method initializeMultipartUpload.

/**
 * Initialize MultiPartUpload request.
 * <p>
 * Note: the specific content type is set by the HeaderPreprocessor.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        S3StorageType s3StorageType;
        if (storageType == null || storageType.equals("")) {
            s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
        } else {
            s3StorageType = toS3StorageType(storageType);
        }
        ReplicationType replicationType = s3StorageType.getType();
        ReplicationFactor replicationFactor = s3StorageType.getFactor();
        OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationType, replicationFactor);
        MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
        multipartUploadInitiateResponse.setBucket(bucket);
        multipartUploadInitiateResponse.setKey(key);
        multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
        return Response.status(Status.OK).entity(multipartUploadInitiateResponse).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
        }
        LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
        throw ex;
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) S3StorageType(org.apache.hadoop.ozone.s3.util.S3StorageType) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 2 with STORAGE_CLASS_HEADER

use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.

the class ObjectEndpoint method put.

/**
 * Rest endpoint to upload object to a bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
 * more details.
 */
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
    OzoneOutputStream output = null;
    if (uploadID != null && !uploadID.equals("")) {
        // If uploadID is specified, it is a request for upload part
        return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
    }
    try {
        String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        S3StorageType s3StorageType;
        boolean storageTypeDefault;
        if (storageType == null || storageType.equals("")) {
            s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
            storageTypeDefault = true;
        } else {
            s3StorageType = toS3StorageType(storageType);
            storageTypeDefault = false;
        }
        ReplicationType replicationType = s3StorageType.getType();
        ReplicationFactor replicationFactor = s3StorageType.getFactor();
        if (copyHeader != null) {
            // Copy object, as copy source available.
            CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
            return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
        }
        // Normal put object
        OzoneBucket bucket = getBucket(bucketName);
        output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        IOUtils.copy(body, output);
        return Response.ok().status(HttpStatus.SC_OK).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        }
        LOG.error("Exception occurred in PutObject", ex);
        throw ex;
    } finally {
        if (output != null) {
            output.close();
        }
    }
}
Also used : S3StorageType(org.apache.hadoop.ozone.s3.util.S3StorageType) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) PUT(javax.ws.rs.PUT)

Example 3 with STORAGE_CLASS_HEADER

use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.

the class TestObjectPut method testInvalidStorageType.

@Test
public void testInvalidStorageType() throws IOException {
    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8));
    objectEndpoint.setHeaders(headers);
    keyName = "sourceKey";
    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random");
    try {
        objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body);
        fail("testInvalidStorageType");
    } catch (OS3Exception ex) {
        assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), ex.getErrorMessage());
        assertEquals("random", ex.getResource());
    }
}
Also used : HttpHeaders(javax.ws.rs.core.HttpHeaders) ByteArrayInputStream(java.io.ByteArrayInputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) Test(org.junit.Test)

Example 4 with STORAGE_CLASS_HEADER

use of org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER in project ozone by apache.

the class TestAbortMultipartUpload method testAbortMultipartUpload.

@Test
public void testAbortMultipartUpload() throws Exception {
    String bucket = OzoneConsts.S3_BUCKET;
    String key = OzoneConsts.KEY;
    OzoneClient client = new OzoneClientStub();
    client.getObjectStore().createS3Bucket(bucket);
    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("STANDARD");
    ObjectEndpoint rest = new ObjectEndpoint();
    rest.setHeaders(headers);
    rest.setClient(client);
    rest.setOzoneConfiguration(new OzoneConfiguration());
    Response response = rest.initializeMultipartUpload(bucket, key);
    assertEquals(200, response.getStatus());
    MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity();
    assertNotNull(multipartUploadInitiateResponse.getUploadID());
    String uploadID = multipartUploadInitiateResponse.getUploadID();
    // Abort multipart upload
    response = rest.delete(bucket, key, uploadID);
    assertEquals(204, response.getStatus());
    // test with unknown upload Id.
    try {
        rest.delete(bucket, key, "random");
    } catch (OS3Exception ex) {
        assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode());
        assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage());
    }
}
Also used : Response(javax.ws.rs.core.Response) HttpHeaders(javax.ws.rs.core.HttpHeaders) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OzoneClientStub(org.apache.hadoop.ozone.client.OzoneClientStub) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) Test(org.junit.Test)

Aggregations

OS3Exception (org.apache.hadoop.ozone.s3.exception.OS3Exception)3 HttpHeaders (javax.ws.rs.core.HttpHeaders)2 ReplicationFactor (org.apache.hadoop.hdds.client.ReplicationFactor)2 ReplicationType (org.apache.hadoop.hdds.client.ReplicationType)2 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)2 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)2 S3StorageType (org.apache.hadoop.ozone.s3.util.S3StorageType)2 Test (org.junit.Test)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 Consumes (javax.ws.rs.Consumes)1 POST (javax.ws.rs.POST)1 PUT (javax.ws.rs.PUT)1 Produces (javax.ws.rs.Produces)1 Response (javax.ws.rs.core.Response)1 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)1 OzoneClient (org.apache.hadoop.ozone.client.OzoneClient)1 OzoneClientStub (org.apache.hadoop.ozone.client.OzoneClientStub)1 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)1