Search in sources :

Example 1 with Error

use of org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error in project ozone by apache.

the class ObjectEndpoint method initializeMultipartUpload.

/**
 * Initialize MultiPartUpload request.
 * <p>
 * Note: the specific content type is set by the HeaderPreprocessor.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD;
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        ReplicationConfig replicationConfig = getReplicationConfig(ozoneBucket, storageType);
        OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationConfig);
        MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
        multipartUploadInitiateResponse.setBucket(bucket);
        multipartUploadInitiateResponse.setKey(key);
        multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
        getMetrics().incInitMultiPartUploadSuccess();
        return Response.status(Status.OK).entity(multipartUploadInitiateResponse).build();
    } catch (OMException ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incInitMultiPartUploadFailure();
        if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
        }
        LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
        throw ex;
    } catch (Exception ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incInitMultiPartUploadFailure();
        throw ex;
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 2 with Error

use of org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error in project ozone by apache.

the class ObjectEndpoint method put.

/**
 * Rest endpoint to upload object to a bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
 * more details.
 */
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.CREATE_KEY;
    boolean auditSuccess = true;
    OzoneOutputStream output = null;
    String copyHeader = null, storageType = null;
    try {
        if (uploadID != null && !uploadID.equals("")) {
            s3GAction = S3GAction.CREATE_MULTIPART_KEY;
            // If uploadID is specified, it is a request for upload part
            return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
        }
        copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
        storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        boolean storageTypeDefault = StringUtils.isEmpty(storageType);
        // Normal put object
        OzoneBucket bucket = getBucket(bucketName);
        ReplicationConfig replicationConfig = getReplicationConfig(bucket, storageType);
        if (copyHeader != null) {
            // Copy object, as copy source available.
            s3GAction = S3GAction.COPY_OBJECT;
            CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucket, keyPath, replicationConfig, storageTypeDefault);
            return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
        }
        output = bucket.createKey(keyPath, length, replicationConfig, new HashMap<>());
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        IOUtils.copy(body, output);
        getMetrics().incCreateKeySuccess();
        return Response.ok().status(HttpStatus.SC_OK).build();
    } catch (OMException ex) {
        auditSuccess = false;
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        if (copyHeader != null) {
            getMetrics().incCopyObjectFailure();
        } else {
            getMetrics().incCreateKeyFailure();
        }
        if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        }
        LOG.error("Exception occurred in PutObject", ex);
        throw ex;
    } catch (Exception ex) {
        auditSuccess = false;
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        if (copyHeader != null) {
            getMetrics().incCopyObjectFailure();
        } else {
            getMetrics().incCreateKeyFailure();
        }
        LOG.error("Exception occurred in PutObject", ex.getMessage());
        throw ex;
    } finally {
        if (auditSuccess) {
            AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
        }
        if (output != null) {
            output.close();
        }
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) S3GAction(org.apache.hadoop.ozone.audit.S3GAction) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) PUT(javax.ws.rs.PUT)

Example 3 with Error

use of org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error in project ozone by apache.

the class ObjectEndpoint method completeMultipartUpload.

/**
 * Complete a multipart upload.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD;
    OzoneBucket ozoneBucket = getBucket(bucket);
    // Using LinkedHashMap to preserve ordering of parts list.
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
    try {
        for (CompleteMultipartUploadRequest.Part part : partList) {
            partsMap.put(part.getPartNumber(), part.geteTag());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parts map {}", partsMap);
        }
        omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
        CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
        completeMultipartUploadResponse.setBucket(bucket);
        completeMultipartUploadResponse.setKey(key);
        completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
        // Location also setting as bucket name.
        completeMultipartUploadResponse.setLocation(bucket);
        AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
        getMetrics().incCompleteMultiPartUploadSuccess();
        return Response.status(Status.OK).entity(completeMultipartUploadResponse).build();
    } catch (OMException ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incCompleteMultiPartUploadFailure();
        if (ex.getResult() == ResultCodes.INVALID_PART) {
            throw newError(S3ErrorTable.INVALID_PART, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
            throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
        } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
            throw newError(ENTITY_TOO_SMALL, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
            throw os3Exception;
        }
        LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
        throw ex;
    } catch (Exception ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        throw ex;
    }
}
Also used : S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces)

Example 4 with Error

use of org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error in project ozone by apache.

the class OzoneClientProducer method getSignature.

@Produces
public S3Auth getSignature() {
    try {
        SignatureInfo signatureInfo = signatureProcessor.parseSignature();
        String stringToSign = "";
        if (signatureInfo.getVersion() == Version.V4) {
            stringToSign = StringToSignProducer.createSignatureBase(signatureInfo, context);
        }
        String awsAccessId = signatureInfo.getAwsAccessId();
        // ONLY validate aws access id when needed.
        if (awsAccessId == null || awsAccessId.equals("")) {
            LOG.debug("Malformed s3 header. awsAccessID: {}", awsAccessId);
            throw ACCESS_DENIED;
        }
        // could be updated later in RpcClient#getS3Volume
        return new S3Auth(stringToSign, signatureInfo.getSignature(), awsAccessId, awsAccessId);
    } catch (OS3Exception ex) {
        LOG.debug("Error during Client Creation: ", ex);
        throw wrapOS3Exception(ex);
    } catch (Exception e) {
        // For any other critical errors during object creation throw Internal
        // error.
        LOG.debug("Error during Client Creation: ", e);
        throw wrapOS3Exception(S3ErrorTable.newError(INTERNAL_ERROR, null, e));
    }
}
Also used : SignatureInfo(org.apache.hadoop.ozone.s3.signature.SignatureInfo) S3Auth(org.apache.hadoop.ozone.om.protocol.S3Auth) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) IOException(java.io.IOException) WebApplicationException(javax.ws.rs.WebApplicationException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) Produces(javax.enterprise.inject.Produces)

Example 5 with Error

use of org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error in project ozone by apache.

the class ObjectEndpoint method delete.

/**
 * Delete a specific object from a bucket, if query param uploadId is
 * specified, this request is for abort multipart upload.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
 * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html
 * for more details.
 */
@DELETE
@SuppressWarnings("emptyblock")
public Response delete(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @QueryParam("uploadId") @DefaultValue("") String uploadId) throws IOException, OS3Exception {
    S3GAction s3GAction = S3GAction.DELETE_KEY;
    try {
        if (uploadId != null && !uploadId.equals("")) {
            s3GAction = S3GAction.ABORT_MULTIPART_UPLOAD;
            return abortMultipartUpload(bucketName, keyPath, uploadId);
        }
        OzoneBucket bucket = getBucket(bucketName);
        bucket.getKey(keyPath);
        bucket.deleteKey(keyPath);
    } catch (OMException ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        if (uploadId != null && !uploadId.equals("")) {
            getMetrics().incAbortMultiPartUploadFailure();
        } else {
            getMetrics().incDeleteKeyFailure();
        }
        if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
        } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
        // NOT_FOUND is not a problem, AWS doesn't throw exception for missing
        // keys. Just return 204
        } else if (ex.getResult() == ResultCodes.DIRECTORY_NOT_EMPTY) {
        // With PREFIX metadata layout, a dir deletion without recursive flag
        // to true will throw DIRECTORY_NOT_EMPTY error for a non-empty dir.
        // NOT_FOUND is not a problem, AWS doesn't throw exception for missing
        // keys. Just return 204
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        } else {
            throw ex;
        }
    } catch (Exception ex) {
        AUDIT.logWriteFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        if (uploadId != null && !uploadId.equals("")) {
            getMetrics().incAbortMultiPartUploadFailure();
        } else {
            getMetrics().incDeleteKeyFailure();
        }
        throw ex;
    }
    getMetrics().incDeleteKeySuccess();
    AUDIT.logWriteSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
    return Response.status(Status.NO_CONTENT).build();
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) ParseException(java.text.ParseException) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) DELETE(javax.ws.rs.DELETE)

Aggregations

OS3Exception (org.apache.hadoop.ozone.s3.exception.OS3Exception)8 IOException (java.io.IOException)7 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)7 S3GAction (org.apache.hadoop.ozone.audit.S3GAction)6 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)6 EOFException (java.io.EOFException)4 UnsupportedEncodingException (java.io.UnsupportedEncodingException)4 ParseException (java.text.ParseException)4 POST (javax.ws.rs.POST)3 Produces (javax.ws.rs.Produces)3 LinkedHashMap (java.util.LinkedHashMap)2 PUT (javax.ws.rs.PUT)2 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 Produces (javax.enterprise.inject.Produces)1 Consumes (javax.ws.rs.Consumes)1 DELETE (javax.ws.rs.DELETE)1 WebApplicationException (javax.ws.rs.WebApplicationException)1 OzoneAcl (org.apache.hadoop.ozone.OzoneAcl)1