Search in sources :

Example 1 with SignedChunksInputStream

use of org.apache.hadoop.ozone.s3.SignedChunksInputStream in project ozone by apache.

the class ObjectEndpoint method createMultipartKey.

private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String copyHeader;
        OzoneOutputStream ozoneOutputStream = null;
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        try {
            ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
            copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
            if (copyHeader != null) {
                Pair<String, String> result = parseSourceHeader(copyHeader);
                String sourceBucket = result.getLeft();
                String sourceKey = result.getRight();
                Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
                String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
                String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
                if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
                    throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
                }
                try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
                    String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
                    if (range != null) {
                        RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
                        final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
                        if (skipped != rangeHeader.getStartOffset()) {
                            throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
                        }
                        IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
                    } else {
                        IOUtils.copy(sourceObject, ozoneOutputStream);
                    }
                }
            } else {
                IOUtils.copy(body, ozoneOutputStream);
            }
        } finally {
            if (ozoneOutputStream != null) {
                ozoneOutputStream.close();
            }
        }
        assert ozoneOutputStream != null;
        OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
        String eTag = omMultipartCommitUploadPartInfo.getPartName();
        if (copyHeader != null) {
            return Response.ok(new CopyPartResult(eTag)).build();
        } else {
            return Response.ok().header("ETag", eTag).build();
        }
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
        }
        throw ex;
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RangeHeader(org.apache.hadoop.ozone.s3.util.RangeHeader) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OptionalLong(java.util.OptionalLong) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 2 with SignedChunksInputStream

use of org.apache.hadoop.ozone.s3.SignedChunksInputStream in project ozone by apache.

the class ObjectEndpoint method put.

/**
 * Rest endpoint to upload object to a bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
 * more details.
 */
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
    OzoneOutputStream output = null;
    if (uploadID != null && !uploadID.equals("")) {
        // If uploadID is specified, it is a request for upload part
        return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
    }
    try {
        String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        S3StorageType s3StorageType;
        boolean storageTypeDefault;
        if (storageType == null || storageType.equals("")) {
            s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
            storageTypeDefault = true;
        } else {
            s3StorageType = toS3StorageType(storageType);
            storageTypeDefault = false;
        }
        ReplicationType replicationType = s3StorageType.getType();
        ReplicationFactor replicationFactor = s3StorageType.getFactor();
        if (copyHeader != null) {
            // Copy object, as copy source available.
            CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
            return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
        }
        // Normal put object
        OzoneBucket bucket = getBucket(bucketName);
        output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        IOUtils.copy(body, output);
        return Response.ok().status(HttpStatus.SC_OK).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        }
        LOG.error("Exception occurred in PutObject", ex);
        throw ex;
    } finally {
        if (output != null) {
            output.close();
        }
    }
}
Also used : S3StorageType(org.apache.hadoop.ozone.s3.util.S3StorageType) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) PUT(javax.ws.rs.PUT)

Aggregations

OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)2 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)2 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)2 SignedChunksInputStream (org.apache.hadoop.ozone.s3.SignedChunksInputStream)2 EOFException (java.io.EOFException)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 OptionalLong (java.util.OptionalLong)1 PUT (javax.ws.rs.PUT)1 ReplicationFactor (org.apache.hadoop.hdds.client.ReplicationFactor)1 ReplicationType (org.apache.hadoop.hdds.client.ReplicationType)1 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)1 OmMultipartCommitUploadPartInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo)1 OS3Exception (org.apache.hadoop.ozone.s3.exception.OS3Exception)1 RangeHeader (org.apache.hadoop.ozone.s3.util.RangeHeader)1 S3StorageType (org.apache.hadoop.ozone.s3.util.S3StorageType)1