Search in sources :

Example 1 with OS3Exception

use of org.apache.hadoop.ozone.s3.exception.OS3Exception in project ozone by apache.

the class ObjectEndpoint method completeMultipartUpload.

/**
 * Complete a multipart upload.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
    OzoneBucket ozoneBucket = getBucket(bucket);
    // Using LinkedHashMap to preserve ordering of parts list.
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
    try {
        for (CompleteMultipartUploadRequest.Part part : partList) {
            partsMap.put(part.getPartNumber(), part.geteTag());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parts map {}", partsMap);
        }
        omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
        CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
        completeMultipartUploadResponse.setBucket(bucket);
        completeMultipartUploadResponse.setKey(key);
        completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
        // Location also setting as bucket name.
        completeMultipartUploadResponse.setLocation(bucket);
        return Response.status(Status.OK).entity(completeMultipartUploadResponse).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.INVALID_PART) {
            throw newError(S3ErrorTable.INVALID_PART, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
            throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
        } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
            throw newError(ENTITY_TOO_SMALL, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
            throw os3Exception;
        }
        LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
        throw ex;
    }
}
Also used : LinkedHashMap(java.util.LinkedHashMap) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces)

Example 2 with OS3Exception

use of org.apache.hadoop.ozone.s3.exception.OS3Exception in project ozone by apache.

the class ObjectEndpoint method createMultipartKey.

private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String copyHeader;
        OzoneOutputStream ozoneOutputStream = null;
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        try {
            ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
            copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
            if (copyHeader != null) {
                Pair<String, String> result = parseSourceHeader(copyHeader);
                String sourceBucket = result.getLeft();
                String sourceKey = result.getRight();
                Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
                String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
                String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
                if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
                    throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
                }
                try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
                    String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
                    if (range != null) {
                        RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
                        final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
                        if (skipped != rangeHeader.getStartOffset()) {
                            throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
                        }
                        IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
                    } else {
                        IOUtils.copy(sourceObject, ozoneOutputStream);
                    }
                }
            } else {
                IOUtils.copy(body, ozoneOutputStream);
            }
        } finally {
            if (ozoneOutputStream != null) {
                ozoneOutputStream.close();
            }
        }
        assert ozoneOutputStream != null;
        OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
        String eTag = omMultipartCommitUploadPartInfo.getPartName();
        if (copyHeader != null) {
            return Response.ok(new CopyPartResult(eTag)).build();
        } else {
            return Response.ok().header("ETag", eTag).build();
        }
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
        }
        throw ex;
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RangeHeader(org.apache.hadoop.ozone.s3.util.RangeHeader) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OptionalLong(java.util.OptionalLong) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 3 with OS3Exception

use of org.apache.hadoop.ozone.s3.exception.OS3Exception in project ozone by apache.

the class ObjectEndpoint method initializeMultipartUpload.

/**
 * Initialize MultiPartUpload request.
 * <p>
 * Note: the specific content type is set by the HeaderPreprocessor.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
@Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER)
public Response initializeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key) throws IOException, OS3Exception {
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        S3StorageType s3StorageType;
        if (storageType == null || storageType.equals("")) {
            s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
        } else {
            s3StorageType = toS3StorageType(storageType);
        }
        ReplicationType replicationType = s3StorageType.getType();
        ReplicationFactor replicationFactor = s3StorageType.getFactor();
        OmMultipartInfo multipartInfo = ozoneBucket.initiateMultipartUpload(key, replicationType, replicationFactor);
        MultipartUploadInitiateResponse multipartUploadInitiateResponse = new MultipartUploadInitiateResponse();
        multipartUploadInitiateResponse.setBucket(bucket);
        multipartUploadInitiateResponse.setKey(key);
        multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID());
        return Response.status(Status.OK).entity(multipartUploadInitiateResponse).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, key, ex);
        }
        LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + "key: {}", bucket, key, ex);
        throw ex;
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) S3StorageType(org.apache.hadoop.ozone.s3.util.S3StorageType) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces) Consumes(javax.ws.rs.Consumes)

Example 4 with OS3Exception

use of org.apache.hadoop.ozone.s3.exception.OS3Exception in project ozone by apache.

the class ObjectEndpoint method put.

/**
 * Rest endpoint to upload object to a bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for
 * more details.
 */
@PUT
public Response put(@PathParam("bucket") String bucketName, @PathParam("path") String keyPath, @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, InputStream body) throws IOException, OS3Exception {
    OzoneOutputStream output = null;
    if (uploadID != null && !uploadID.equals("")) {
        // If uploadID is specified, it is a request for upload part
        return createMultipartKey(bucketName, keyPath, length, partNumber, uploadID, body);
    }
    try {
        String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
        String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
        S3StorageType s3StorageType;
        boolean storageTypeDefault;
        if (storageType == null || storageType.equals("")) {
            s3StorageType = S3StorageType.getDefault(ozoneConfiguration);
            storageTypeDefault = true;
        } else {
            s3StorageType = toS3StorageType(storageType);
            storageTypeDefault = false;
        }
        ReplicationType replicationType = s3StorageType.getType();
        ReplicationFactor replicationFactor = s3StorageType.getFactor();
        if (copyHeader != null) {
            // Copy object, as copy source available.
            CopyObjectResponse copyObjectResponse = copyObject(copyHeader, bucketName, keyPath, replicationType, replicationFactor, storageTypeDefault);
            return Response.status(Status.OK).entity(copyObjectResponse).header("Connection", "close").build();
        }
        // Normal put object
        OzoneBucket bucket = getBucket(bucketName);
        output = bucket.createKey(keyPath, length, replicationType, replicationFactor, new HashMap<>());
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        IOUtils.copy(body, output);
        return Response.ok().status(HttpStatus.SC_OK).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, keyPath, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the PutObject/MPU PartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail.");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
        }
        LOG.error("Exception occurred in PutObject", ex);
        throw ex;
    } finally {
        if (output != null) {
            output.close();
        }
    }
}
Also used : S3StorageType(org.apache.hadoop.ozone.s3.util.S3StorageType) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ReplicationType(org.apache.hadoop.hdds.client.ReplicationType) ReplicationFactor(org.apache.hadoop.hdds.client.ReplicationFactor) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) PUT(javax.ws.rs.PUT)

Example 5 with OS3Exception

use of org.apache.hadoop.ozone.s3.exception.OS3Exception in project ozone by apache.

the class BucketEndpoint method get.

/**
 * Rest endpoint to list objects in a specific bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
 * for more details.
 */
@GET
@SuppressFBWarnings
@SuppressWarnings("parameternumber")
public Response get(@PathParam("bucket") String bucketName, @QueryParam("delimiter") String delimiter, @QueryParam("encoding-type") String encodingType, @QueryParam("marker") String marker, @DefaultValue("1000") @QueryParam("max-keys") int maxKeys, @QueryParam("prefix") String prefix, @QueryParam("browser") String browser, @QueryParam("continuation-token") String continueToken, @QueryParam("start-after") String startAfter, @QueryParam("uploads") String uploads, @QueryParam("acl") String aclMarker, @Context HttpHeaders hh) throws OS3Exception, IOException {
    if (aclMarker != null) {
        S3BucketAcl result = getAcl(bucketName);
        return Response.ok(result, MediaType.APPLICATION_XML_TYPE).build();
    }
    if (browser != null) {
        InputStream browserPage = getClass().getResourceAsStream("/browser.html");
        return Response.ok(browserPage, MediaType.TEXT_HTML_TYPE).build();
    }
    if (uploads != null) {
        return listMultipartUploads(bucketName, prefix);
    }
    if (prefix == null) {
        prefix = "";
    }
    OzoneBucket bucket = getBucket(bucketName);
    Iterator<? extends OzoneKey> ozoneKeyIterator;
    ContinueToken decodedToken = ContinueToken.decodeFromString(continueToken);
    // Assign marker to startAfter. for the compatibility of aws api v1
    if (startAfter == null && marker != null) {
        startAfter = marker;
    }
    try {
        if (startAfter != null && continueToken != null) {
            // If continuation token and start after both are provided, then we
            // ignore start After
            ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey());
        } else if (startAfter != null && continueToken == null) {
            ozoneKeyIterator = bucket.listKeys(prefix, startAfter);
        } else if (startAfter == null && continueToken != null) {
            ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey());
        } else {
            ozoneKeyIterator = bucket.listKeys(prefix);
        }
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
        } else {
            throw ex;
        }
    }
    ListObjectResponse response = new ListObjectResponse();
    response.setDelimiter(delimiter);
    response.setName(bucketName);
    response.setPrefix(prefix);
    response.setMarker(marker == null ? "" : marker);
    response.setMaxKeys(maxKeys);
    response.setEncodingType(ENCODING_TYPE);
    response.setTruncated(false);
    response.setContinueToken(continueToken);
    String prevDir = null;
    if (continueToken != null) {
        prevDir = decodedToken.getLastDir();
    }
    String lastKey = null;
    int count = 0;
    while (ozoneKeyIterator.hasNext()) {
        OzoneKey next = ozoneKeyIterator.next();
        String relativeKeyName = next.getName().substring(prefix.length());
        int depth = StringUtils.countMatches(relativeKeyName, delimiter);
        if (delimiter != null) {
            if (depth > 0) {
                // means key has multiple delimiters in its value.
                // ex: dir/dir1/dir2, where delimiter is "/" and prefix is dir/
                String dirName = relativeKeyName.substring(0, relativeKeyName.indexOf(delimiter));
                if (!dirName.equals(prevDir)) {
                    response.addPrefix(prefix + dirName + delimiter);
                    prevDir = dirName;
                    count++;
                }
            } else if (relativeKeyName.endsWith(delimiter)) {
                // means or key is same as prefix with delimiter at end and ends with
                // delimiter. ex: dir/, where prefix is dir and delimiter is /
                response.addPrefix(relativeKeyName);
                count++;
            } else {
                // means our key is matched with prefix if prefix is given and it
                // does not have any common prefix.
                addKey(response, next);
                count++;
            }
        } else {
            addKey(response, next);
            count++;
        }
        if (count == maxKeys) {
            lastKey = next.getName();
            break;
        }
    }
    response.setKeyCount(count);
    if (count < maxKeys) {
        response.setTruncated(false);
    } else if (ozoneKeyIterator.hasNext()) {
        response.setTruncated(true);
        ContinueToken nextToken = new ContinueToken(lastKey, prevDir);
        response.setNextToken(nextToken.encodeToString());
        // Set nextMarker to be lastKey. for the compatibility of aws api v1
        response.setNextMarker(lastKey);
    } else {
        response.setTruncated(false);
    }
    response.setKeyCount(response.getCommonPrefixes().size() + response.getContents().size());
    return Response.ok(response).build();
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ContinueToken(org.apache.hadoop.ozone.s3.util.ContinueToken) InputStream(java.io.InputStream) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) GET(javax.ws.rs.GET) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Aggregations

OS3Exception (org.apache.hadoop.ozone.s3.exception.OS3Exception)21 Test (org.junit.Test)14 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)12 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)11 Response (javax.ws.rs.core.Response)7 ByteArrayInputStream (java.io.ByteArrayInputStream)6 ArrayList (java.util.ArrayList)6 IOException (java.io.IOException)5 HashMap (java.util.HashMap)5 Part (org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part)5 LinkedHashMap (java.util.LinkedHashMap)4 POST (javax.ws.rs.POST)4 Produces (javax.ws.rs.Produces)4 HttpHeaders (javax.ws.rs.core.HttpHeaders)4 OzoneClient (org.apache.hadoop.ozone.client.OzoneClient)4 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)4 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)4 GET (javax.ws.rs.GET)3 OzoneAcl (org.apache.hadoop.ozone.OzoneAcl)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2