use of org.apache.hadoop.ozone.s3.SignedChunksInputStream in project ozone by apache.
the class ObjectEndpoint method createMultipartKey.
private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
try {
OzoneBucket ozoneBucket = getBucket(bucket);
String copyHeader;
OzoneOutputStream ozoneOutputStream = null;
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
try {
ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
if (copyHeader != null) {
Pair<String, String> result = parseSourceHeader(copyHeader);
String sourceBucket = result.getLeft();
String sourceKey = result.getRight();
Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
}
try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
if (range != null) {
RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
if (skipped != rangeHeader.getStartOffset()) {
throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
}
IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
} else {
IOUtils.copy(sourceObject, ozoneOutputStream);
}
}
} else {
IOUtils.copy(body, ozoneOutputStream);
}
} finally {
if (ozoneOutputStream != null) {
ozoneOutputStream.close();
}
}
assert ozoneOutputStream != null;
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
String eTag = omMultipartCommitUploadPartInfo.getPartName();
if (copyHeader != null) {
return Response.ok(new CopyPartResult(eTag)).build();
} else {
return Response.ok().header("ETag", eTag).build();
}
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
throw newError(NO_SUCH_UPLOAD, uploadID, ex);
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
}
throw ex;
}
}
use of org.apache.hadoop.ozone.s3.SignedChunksInputStream in project ozone by apache.
the class ObjectEndpoint method put.
/**
* Rest endpoint to upload object to a bucket.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
* more details.
*/
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
OzoneOutputStream output = null;
if (uploadID != null && !uploadID.equals("")) {
// If uploadID is specified, it is a request for upload part
return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
}
try {
String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
S3StorageType s3StorageType;
boolean storageTypeDefault;
if (storageType == null || storageType.equals("")) {
s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
storageTypeDefault = true;
} else {
s3StorageType = toS3StorageType(storageType);
storageTypeDefault = false;
}
ReplicationType replicationType = s3StorageType.getType();
ReplicationFactor replicationFactor = s3StorageType.getFactor();
if (copyHeader != null) {
// Copy object, as copy source available.
CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
}
// Normal put object
OzoneBucket bucket = getBucket(bucketName);
output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
body = new SignedChunksInputStream(body);
}
IOUtils.copy(body, output);
return Response.ok().status(HttpStatus.SC_OK).build();
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.NOT_A_FILE) {
OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
throw os3Exception;
} else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
}
LOG.error("Exception occurred in PutObject", ex);
throw ex;
} finally {
if (output != null) {
output.close();
}
}
}
Aggregations