Search in sources :

Example 1 with S3GAction

use of org.apache.hadoop.ozone.audit.S3GAction in project ozone by apache.

the class ObjectEndpoint method head.

/**
 * Rest endpoint to check existence of an object in a bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
 * for more details.
 */
@HEAD
public Response head(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.HEAD_KEY;
    OzoneKey key;
    try {
        key = getBucket(bucketName).headObject(keyPath);
    // TODO: return the specified range bytes of this object.
    } catch (OMException ex) {
        AUDIT.logReadFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incHeadKeyFailure();
        if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
            // Just return 404 with no content
            return Response.status(Status.NOT_FOUND).build();
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        } else {
            throw ex;
        }
    } catch (Exception ex) {
        AUDIT.logReadFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        throw ex;
    }
    ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK).header("ETag", "" + key.getModificationTime()).header("Content-Length", key.getDataSize()).header("Content-Type", "binary/octet-stream");
    addLastModifiedDate(response, key);
    getMetrics().incHeadKeySuccess();
    AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
    return response.build();
}
Also used : S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) ResponseBuilder(javax.ws.rs.core.Response.ResponseBuilder) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) HEAD(javax.ws.rs.HEAD)

Example 2 with S3GAction

use of org.apache.hadoop.ozone.audit.S3GAction in project ozone by apache.

the class ObjectEndpoint method initializeMultipartUpload.

/**
 * Initialize MultiPartUpload request.
 * <p>
 * Note: the specific content type is set by the HeaderPreprocessor.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD;
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        ReplicationConfig replicationConfig = getReplicationConfig(ozoneBucket, storageType);
        OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationConfig);
        MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
        multipartUploadInitiateResponse.setBucket(bucket);
        multipartUploadInitiateResponse.setKey(key);
        multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
        getMetrics().incInitMultiPartUploadSuccess();
        return Response.status(Status.OK).entity(multipartUploadInitiateResponse).build();
    } catch (OMException ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incInitMultiPartUploadFailure();
        if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
        }
        LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
        throw ex;
    } catch (Exception ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incInitMultiPartUploadFailure();
        throw ex;
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 3 with S3GAction

use of org.apache.hadoop.ozone.audit.S3GAction in project ozone by apache.

the class ObjectEndpoint method put.

/**
 * Rest endpoint to upload object to a bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
 * more details.
 */
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.CREATE_KEY;
    boolean auditSuccess = true;
    OzoneOutputStream output = null;
    String copyHeader = null, storageType = null;
    try {
        if (uploadID != null && !uploadID.equals("")) {
            s3GAction = S3GAction.CREATE_MULTIPART_KEY;
            // If uploadID is specified, it is a request for upload part
            return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
        }
        copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
        storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        boolean storageTypeDefault = StringUtils.isEmpty(storageType);
        // Normal put object
        OzoneBucket bucket = getBucket(bucketName);
        ReplicationConfig replicationConfig = getReplicationConfig(bucket, storageType);
        if (copyHeader != null) {
            // Copy object, as copy source available.
            s3GAction = S3GAction.COPY_OBJECT;
            CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucket, keyPath, replicationConfig, storageTypeDefault);
            return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
        }
        output = bucket.createKey(keyPath, length, replicationConfig, new HashMap<>());
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        IOUtils.copy(body, output);
        getMetrics().incCreateKeySuccess();
        return Response.ok().status(HttpStatus.SC_OK).build();
    } catch (OMException ex) {
        auditSuccess = false;
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        if (copyHeader != null) {
            getMetrics().incCopyObjectFailure();
        } else {
            getMetrics().incCreateKeyFailure();
        }
        if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        }
        LOG.error("Exception occurred in PutObject", ex);
        throw ex;
    } catch (Exception ex) {
        auditSuccess = false;
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        if (copyHeader != null) {
            getMetrics().incCopyObjectFailure();
        } else {
            getMetrics().incCreateKeyFailure();
        }
        LOG.error("Exception occurred in PutObject", ex.getMessage());
        throw ex;
    } finally {
        if (auditSuccess) {
            AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
        }
        if (output != null) {
            output.close();
        }
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) S3GAction(org.apache.hadoop.ozone.audit.S3GAction) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) PUT(javax.ws.rs.PUT)

Example 4 with S3GAction

use of org.apache.hadoop.ozone.audit.S3GAction in project ozone by apache.

the class ObjectEndpoint method completeMultipartUpload.

/**
 * Complete a multipart upload.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD;
    OzoneBucket ozoneBucket = getBucket(bucket);
    // Using LinkedHashMap to preserve ordering of parts list.
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
    try {
        for (CompleteMultipartUploadRequest.Part part : partList) {
            partsMap.put(part.getPartNumber(), part.geteTag());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parts map {}", partsMap);
        }
        omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
        CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
        completeMultipartUploadResponse.setBucket(bucket);
        completeMultipartUploadResponse.setKey(key);
        completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
        // Location also setting as bucket name.
        completeMultipartUploadResponse.setLocation(bucket);
        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
        getMetrics().incCompleteMultiPartUploadSuccess();
        return Response.status(Status.OK).entity(completeMultipartUploadResponse).build();
    } catch (OMException ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incCompleteMultiPartUploadFailure();
        if (ex.getResult() == ResultCodes.INVALID_PART) {
            throw newError(S3ErrorTable.INVALID_PART, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
            throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
        } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
            throw newError(ENTITY_TOO_SMALL, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
            throw os3Exception;
        }
        LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
        throw ex;
    } catch (Exception ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        throw ex;
    }
}
Also used : S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces)

Example 5 with S3GAction

use of org.apache.hadoop.ozone.audit.S3GAction in project ozone by apache.

the class BucketEndpoint method delete.

/**
 * Rest endpoint to delete specific bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETE.html
 * for more details.
 */
@DELETE
public Response delete(@PathParam("bucket") String bucketName) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.DELETE_BUCKET;
    try {
        deleteS3Bucket(bucketName);
    } catch (OMException ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incDeleteBucketFailure();
        if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) {
            throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex);
        } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
        } else {
            throw ex;
        }
    } catch (Exception ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        throw ex;
    }
    AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
    getMetrics().incDeleteBucketSuccess();
    return Response.status(HttpStatus.SC_NO_CONTENT).build();
}
Also used : S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) IOException(java.io.IOException) DELETE(javax.ws.rs.DELETE)

Aggregations

IOException (java.io.IOException)12 S3GAction (org.apache.hadoop.ozone.audit.S3GAction)12 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)12 OS3Exception (org.apache.hadoop.ozone.s3.exception.OS3Exception)12 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)8 EOFException (java.io.EOFException)6 UnsupportedEncodingException (java.io.UnsupportedEncodingException)6 ParseException (java.text.ParseException)6 POST (javax.ws.rs.POST)4 Produces (javax.ws.rs.Produces)4 LinkedHashMap (java.util.LinkedHashMap)3 DELETE (javax.ws.rs.DELETE)3 HEAD (javax.ws.rs.HEAD)3 PUT (javax.ws.rs.PUT)3 HashMap (java.util.HashMap)2 Consumes (javax.ws.rs.Consumes)2 GET (javax.ws.rs.GET)2 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)2 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)2 S3ErrorTable.newError (org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError)2