use of org.apache.hadoop.ozone.s3.util.RangeHeader in project ozone by apache.
the class ObjectEndpoint method createMultipartKey.
private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String copyHeader;
OzoneOutputStream ozoneOutputStream = null;
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
try {
ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
if (copyHeader != null) {
Pair<String, String> result = parseSourceHeader(copyHeader);
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
}
try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
if (range != null) {
RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
if (skipped != rangeHeader.getStartOffset()) {
throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
}
IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
} else {
IOUtils.copy(sourceObject, ozoneOutputStream);
}
}
} else {
IOUtils.copy(body, ozoneOutputStream);
}
} finally {
if (ozoneOutputStream != null) {
ozoneOutputStream.close();
}
}
assert ozoneOutputStream != null;
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
String eTag = omMultipartCommitUploadPartInfo.getPartName();
if (copyHeader != null) {
return Response.ok(new CopyPartResult(eTag)).build();
} else {
return Response.ok().header("ETag", eTag).build();
}
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw newError(NO_SUCH_UPLOAD, uploadID, ex);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
}
throw ex;
}
}
use of org.apache.hadoop.ozone.s3.util.RangeHeader in project ozone by apache.
the class ObjectEndpoint method get.
/**
* Rest endpoint to download object from a bucket, if query param uploadId
* is specified, request for list parts of a multipart upload key with
* specific uploadId.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
* https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html
* for more details.
*/
@GET
public Response get(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @QueryParam("uploadId") String uploadId, @QueryParam("max-parts") @DefaultValue("1000") int maxParts, @QueryParam("part-number-marker") String partNumberMarker, InputStream body) throws IOException, OS3Exception {
try {
if (uploadId != null) {
// When we have uploadId, this is the request for list Parts.
int partMarker = parsePartNumberMarker(partNumberMarker);
return listParts(bucketName, keyPath, uploadId, partMarker, maxParts);
}
OzoneBucket bucket = getBucket(bucketName);
OzoneKeyDetails keyDetails = bucket.getKey(keyPath);
long length = keyDetails.getDataSize();
LOG.debug("Data length of the key {} is {}", keyPath, length);
String rangeHeaderVal = headers.getHeaderString(RANGE_HEADER);
RangeHeader rangeHeader = null;
LOG.debug("range Header provided value: {}", rangeHeaderVal);
if (rangeHeaderVal != null) {
rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal, length);
LOG.debug("range Header provided: {}", rangeHeader);
if (rangeHeader.isInValidRange()) {
throw newError(S3ErrorTable.INVALID_RANGE, rangeHeaderVal);
}
}
ResponseBuilder responseBuilder;
if (rangeHeaderVal == null || rangeHeader.isReadFull()) {
StreamingOutput output = dest -> {
try (OzoneInputStream key = bucket.readKey(keyPath)) {
IOUtils.copy(key, dest);
}
};
responseBuilder = Response.ok(output).header(CONTENT_LENGTH, keyDetails.getDataSize());
} else {
long startOffset = rangeHeader.getStartOffset();
long endOffset = rangeHeader.getEndOffset();
// eg. if range header is given as bytes=0-0, then we should return 1
// byte from start offset
long copyLength = endOffset - startOffset + 1;
StreamingOutput output = dest -> {
try (OzoneInputStream ozoneInputStream = bucket.readKey(keyPath)) {
ozoneInputStream.seek(startOffset);
IOUtils.copyLarge(ozoneInputStream, dest, 0, copyLength, new byte[bufferSize]);
}
};
responseBuilder = Response.ok(output).header(CONTENT_LENGTH, copyLength);
String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " + rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() + "/" + length;
responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal);
}
responseBuilder.header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT);
for (String responseHeader : customizableGetHeaders) {
String headerValue = headers.getHeaderString(responseHeader);
if (headerValue != null) {
responseBuilder.header(responseHeader, headerValue);
}
}
addLastModifiedDate(responseBuilder, keyDetails);
return responseBuilder.build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
} else {
throw ex;
}
}
}
Aggregations