Search in sources :

Example 1 with RangeHeader

use of org.apache.hadoop.ozone.s3.util.RangeHeader in project ozone by apache.

the class ObjectEndpoint method createMultipartKey.

private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String copyHeader;
        OzoneOutputStream ozoneOutputStream = null;
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        try {
            ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
            copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
            if (copyHeader != null) {
                Pair<String, String> result = parseSourceHeader(copyHeader);
                String sourceBucket = result.getLeft();
                String sourceKey = result.getRight();
                Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
                String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
                String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
                if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
                    throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
                }
                try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
                    String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
                    if (range != null) {
                        RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
                        final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
                        if (skipped != rangeHeader.getStartOffset()) {
                            throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
                        }
                        IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
                    } else {
                        IOUtils.copy(sourceObject, ozoneOutputStream);
                    }
                }
            } else {
                IOUtils.copy(body, ozoneOutputStream);
            }
        } finally {
            if (ozoneOutputStream != null) {
                ozoneOutputStream.close();
            }
        }
        assert ozoneOutputStream != null;
        OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
        String eTag = omMultipartCommitUploadPartInfo.getPartName();
        if (copyHeader != null) {
            return Response.ok(new CopyPartResult(eTag)).build();
        } else {
            return Response.ok().header("ETag", eTag).build();
        }
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
        }
        throw ex;
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RangeHeader(org.apache.hadoop.ozone.s3.util.RangeHeader) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OptionalLong(java.util.OptionalLong) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 2 with RangeHeader

use of org.apache.hadoop.ozone.s3.util.RangeHeader in project ozone by apache.

the class ObjectEndpoint method get.

/**
 * Rest endpoint to download object from a bucket, if query param uploadId
 * is specified, request for list parts of a multipart upload key with
 * specific uploadId.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
 * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html
 * for more details.
 */
@GET
public Response get(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @QueryParam("uploadId") String uploadId, @QueryParam("max-parts") @DefaultValue("1000") int maxParts, @QueryParam("part-number-marker") String partNumberMarker, InputStream body) throws IOException, OS3Exception {
    try {
        if (uploadId != null) {
            // When we have uploadId, this is the request for list Parts.
            int partMarker = parsePartNumberMarker(partNumberMarker);
            return listParts(bucketName, keyPath, uploadId, partMarker, maxParts);
        }
        OzoneBucket bucket = getBucket(bucketName);
        OzoneKeyDetails keyDetails = bucket.getKey(keyPath);
        long length = keyDetails.getDataSize();
        LOG.debug("Data length of the key {} is {}", keyPath, length);
        String rangeHeaderVal = headers.getHeaderString(RANGE_HEADER);
        RangeHeader rangeHeader = null;
        LOG.debug("range Header provided value: {}", rangeHeaderVal);
        if (rangeHeaderVal != null) {
            rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal, length);
            LOG.debug("range Header provided: {}", rangeHeader);
            if (rangeHeader.isInValidRange()) {
                throw newError(S3ErrorTable.INVALID_RANGE, rangeHeaderVal);
            }
        }
        ResponseBuilder responseBuilder;
        if (rangeHeaderVal == null || rangeHeader.isReadFull()) {
            StreamingOutput output = dest -> {
                try (OzoneInputStream key = bucket.readKey(keyPath)) {
                    IOUtils.copy(key, dest);
                }
            };
            responseBuilder = Response.ok(output).header(CONTENT_LENGTH, keyDetails.getDataSize());
        } else {
            long startOffset = rangeHeader.getStartOffset();
            long endOffset = rangeHeader.getEndOffset();
            // eg. if range header is given as bytes=0-0, then we should return 1
            // byte from start offset
            long copyLength = endOffset - startOffset + 1;
            StreamingOutput output = dest -> {
                try (OzoneInputStream ozoneInputStream = bucket.readKey(keyPath)) {
                    ozoneInputStream.seek(startOffset);
                    IOUtils.copyLarge(ozoneInputStream, dest, 0, copyLength, new byte[bufferSize]);
                }
            };
            responseBuilder = Response.ok(output).header(CONTENT_LENGTH, copyLength);
            String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " + rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() + "/" + length;
            responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal);
        }
        responseBuilder.header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT);
        for (String responseHeader : customizableGetHeaders) {
            String headerValue = headers.getHeaderString(responseHeader);
            if (headerValue != null) {
                responseBuilder.header(responseHeader, headerValue);
            }
        }
        addLastModifiedDate(responseBuilder, keyDetails);
        return responseBuilder.build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        } else {
            throw ex;
        }
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) S3StorageType(org.apache.hadoop.ozone.s3.util.S3StorageType) INVALID_ARGUMENT(org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT) Produces(javax.ws.rs.Produces) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) ZonedDateTime(java.time.ZonedDateTime) Path(javax.ws.rs.Path) HeaderPreprocessor(org.apache.hadoop.ozone.s3.HeaderPreprocessor) CONTENT_RANGE_HEADER(org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER) LoggerFactory(org.slf4j.LoggerFactory) NO_SUCH_UPLOAD(org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD) HttpStatus(org.apache.http.HttpStatus) MediaType(javax.ws.rs.core.MediaType) QueryParam(javax.ws.rs.QueryParam) Consumes(javax.ws.rs.Consumes) Pair(org.apache.commons.lang3.tuple.Pair) ENTITY_TOO_SMALL(org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL) STORAGE_CLASS_HEADER(org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER) Map(java.util.Map) DefaultValue(javax.ws.rs.DefaultValue) HeaderParam(javax.ws.rs.HeaderParam) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT(org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_DEFAULT) ParseException(java.text.ParseException) CONTENT_LENGTH(javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH) RANGE_HEADER(org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER) DELETE(javax.ws.rs.DELETE) Context(javax.ws.rs.core.Context) S3Utils.urlDecode(org.apache.hadoop.ozone.s3.util.S3Utils.urlDecode) StreamingOutput(javax.ws.rs.core.StreamingOutput) Instant(java.time.Instant) EOFException(java.io.EOFException) ZoneId(java.time.ZoneId) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) OzoneUtils(org.apache.hadoop.ozone.web.utils.OzoneUtils) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) StorageUnit(org.apache.hadoop.hdds.conf.StorageUnit) PRECOND_FAILED(org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED) HttpHeaders(javax.ws.rs.core.HttpHeaders) RFC1123Util(org.apache.hadoop.ozone.s3.util.RFC1123Util) S3ErrorTable.newError(org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError) Response(javax.ws.rs.core.Response) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) COPY_SOURCE_IF_MODIFIED_SINCE(org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_MODIFIED_SINCE) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) PostConstruct(javax.annotation.PostConstruct) ACCEPT_RANGE_HEADER(org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER) COPY_SOURCE_IF_UNMODIFIED_SINCE(org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_IF_UNMODIFIED_SINCE) UnsupportedEncodingException(java.io.UnsupportedEncodingException) HEAD(javax.ws.rs.HEAD) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) RangeHeaderParserUtil(org.apache.hadoop.ozone.s3.util.RangeHeaderParserUtil) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) PathParam(javax.ws.rs.PathParam) LAST_MODIFIED(javax.ws.rs.core.HttpHeaders.LAST_MODIFIED) INVALID_REQUEST(org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_REQUEST) GET(javax.ws.rs.GET) RangeHeader(org.apache.hadoop.ozone.s3.util.RangeHeader) HashMap(java.util.HashMap) OZONE_S3G_CLIENT_BUFFER_SIZE_KEY(org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_CLIENT_BUFFER_SIZE_KEY) ResultCodes(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) LinkedHashMap(java.util.LinkedHashMap) OptionalLong(java.util.OptionalLong) S3ErrorTable(org.apache.hadoop.ozone.s3.exception.S3ErrorTable) OZONE_OM_ENABLE_FILESYSTEM_PATHS(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) Status(javax.ws.rs.core.Response.Status) ResponseBuilder(javax.ws.rs.core.Response.ResponseBuilder) COPY_SOURCE_HEADER(org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER) POST(javax.ws.rs.POST) Logger(org.slf4j.Logger) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) COPY_SOURCE_HEADER_RANGE(org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE) IOException(java.io.IOException) OzoneConsts(org.apache.hadoop.ozone.OzoneConsts) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) RANGE_HEADER_SUPPORTED_UNIT(org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT) OzoneMultipartUploadPartListParts(org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts) Time(org.apache.hadoop.util.Time) PUT(javax.ws.rs.PUT) VisibleForTesting(com.google.common.annotations.VisibleForTesting) InputStream(java.io.InputStream) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) StreamingOutput(javax.ws.rs.core.StreamingOutput) ResponseBuilder(javax.ws.rs.core.Response.ResponseBuilder) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) RangeHeader(org.apache.hadoop.ozone.s3.util.RangeHeader) GET(javax.ws.rs.GET)

Aggregations

EOFException (java.io.EOFException)2 OptionalLong (java.util.OptionalLong)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 IOException (java.io.IOException)1 InputStream (java.io.InputStream)1 UnsupportedEncodingException (java.io.UnsupportedEncodingException)1 ParseException (java.text.ParseException)1 Instant (java.time.Instant)1 ZoneId (java.time.ZoneId)1 ZonedDateTime (java.time.ZonedDateTime)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 LinkedHashMap (java.util.LinkedHashMap)1 List (java.util.List)1 Map (java.util.Map)1 PostConstruct (javax.annotation.PostConstruct)1 Inject (javax.inject.Inject)1 Consumes (javax.ws.rs.Consumes)1 DELETE (javax.ws.rs.DELETE)1 DefaultValue (javax.ws.rs.DefaultValue)1