Search in sources :

Example 91 with ObjectMetadata

use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project crate by crate.

the class S3BlobContainer method executeMultipartUpload.

/**
 * Uploads a blob using multipart upload requests.
 */
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
    if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
        throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
    }
    if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
        throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
    }
    final long partSize = blobStore.bufferSizeInBytes();
    final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
    if (multiparts.v1() > Integer.MAX_VALUE) {
        throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
    }
    final int nbParts = multiparts.v1().intValue();
    final long lastPartSize = multiparts.v2();
    assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
    final SetOnce<String> uploadId = new SetOnce<>();
    final String bucketName = blobStore.bucket();
    boolean success = false;
    final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
    initRequest.setStorageClass(blobStore.getStorageClass());
    initRequest.setCannedACL(blobStore.getCannedACL());
    if (blobStore.serverSideEncryption()) {
        final ObjectMetadata md = new ObjectMetadata();
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        initRequest.setObjectMetadata(md);
    }
    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
        if (Strings.isEmpty(uploadId.get())) {
            throw new IOException("Failed to initialize multipart upload " + blobName);
        }
        final List<PartETag> parts = new ArrayList<>();
        long bytesCount = 0;
        for (int i = 1; i <= nbParts; i++) {
            final UploadPartRequest uploadRequest = new UploadPartRequest();
            uploadRequest.setBucketName(bucketName);
            uploadRequest.setKey(blobName);
            uploadRequest.setUploadId(uploadId.get());
            uploadRequest.setPartNumber(i);
            uploadRequest.setInputStream(input);
            if (i < nbParts) {
                uploadRequest.setPartSize(partSize);
                uploadRequest.setLastPart(false);
            } else {
                uploadRequest.setPartSize(lastPartSize);
                uploadRequest.setLastPart(true);
            }
            bytesCount += uploadRequest.getPartSize();
            final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
            parts.add(uploadResponse.getPartETag());
        }
        if (bytesCount != blobSize) {
            throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
        }
        final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
        clientReference.client().completeMultipartUpload(complRequest);
        success = true;
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
    } finally {
        if ((success == false) && Strings.hasLength(uploadId.get())) {
            final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
            try (AmazonS3Reference clientReference = blobStore.clientReference()) {
                clientReference.client().abortMultipartUpload(abortRequest);
            }
        }
    }
}
Also used : SetOnce(org.apache.lucene.util.SetOnce) AmazonClientException(com.amazonaws.AmazonClientException) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) ArrayList(java.util.ArrayList) UploadPartRequest(com.amazonaws.services.s3.model.UploadPartRequest) AbortMultipartUploadRequest(com.amazonaws.services.s3.model.AbortMultipartUploadRequest) IOException(java.io.IOException) PartETag(com.amazonaws.services.s3.model.PartETag) UploadPartResult(com.amazonaws.services.s3.model.UploadPartResult) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) CompleteMultipartUploadRequest(com.amazonaws.services.s3.model.CompleteMultipartUploadRequest)

Example 92 with ObjectMetadata

use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project crate by crate.

the class S3BlobContainer method executeSingleUpload.

/**
 * Uploads a blob using a single upload request
 */
void executeSingleUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
    // Extra safety checks
    if (blobSize > MAX_FILE_SIZE.getBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
    }
    if (blobSize > blobStore.bufferSizeInBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
    }
    final ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(blobSize);
    if (blobStore.serverSideEncryption()) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
    putRequest.setStorageClass(blobStore.getStorageClass());
    putRequest.setCannedAcl(blobStore.getCannedACL());
    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        clientReference.client().putObject(putRequest);
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
    }
}
Also used : AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 93 with ObjectMetadata

use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project alluxio by Alluxio.

the class S3AUnderFileSystem method copyObject.

@Override
protected boolean copyObject(String src, String dst) {
    LOG.debug("Copying {} to {}", src, dst);
    // Retry copy for a few times, in case some AWS internal errors happened during copy.
    int retries = 3;
    for (int i = 0; i < retries; i++) {
        try {
            CopyObjectRequest request = new CopyObjectRequest(mBucketName, src, mBucketName, dst);
            if (mUfsConf.getBoolean(PropertyKey.UNDERFS_S3_SERVER_SIDE_ENCRYPTION_ENABLED)) {
                ObjectMetadata meta = new ObjectMetadata();
                meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
                request.setNewObjectMetadata(meta);
            }
            mManager.copy(request).waitForCopyResult();
            return true;
        } catch (AmazonClientException | InterruptedException e) {
            LOG.error("Failed to copy file {} to {}", src, dst, e);
            if (i != retries - 1) {
                LOG.error("Retrying copying file {} to {}", src, dst);
            }
        }
    }
    LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries);
    return false;
}
Also used : CopyObjectRequest(com.amazonaws.services.s3.model.CopyObjectRequest) AmazonClientException(com.amazonaws.AmazonClientException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Example 94 with ObjectMetadata

use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project alluxio by Alluxio.

the class S3AUnderFileSystem method createEmptyObject.

@Override
public boolean createEmptyObject(String key) {
    try {
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(0);
        meta.setContentMD5(DIR_HASH);
        meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
        mClient.putObject(new PutObjectRequest(mBucketName, key, new ByteArrayInputStream(new byte[0]), meta));
        return true;
    } catch (AmazonClientException e) {
        LOG.error("Failed to create object: {}", key, e);
        return false;
    }
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) AmazonClientException(com.amazonaws.AmazonClientException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Example 95 with ObjectMetadata

use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project alluxio by Alluxio.

the class S3ALowLevelOutputStream method initMultiPartUpload.

/**
 * Initializes multipart upload.
 */
private void initMultiPartUpload(AmazonS3 s3Client) throws IOException {
    // Generate the object metadata by setting server side encryption, md5 checksum,
    // and encoding as octet stream since no assumptions are made about the file type
    ObjectMetadata meta = new ObjectMetadata();
    if (mSseEnabled) {
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    if (mHash != null) {
        meta.setContentMD5(Base64.encodeAsString(mHash.digest()));
    }
    meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    AmazonClientException lastException;
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(mBucketName, mKey).withObjectMetadata(meta);
    do {
        try {
            mUploadId = s3Client.initiateMultipartUpload(initRequest).getUploadId();
            return;
        } catch (AmazonClientException e) {
            lastException = e;
        }
    } while (mRetryPolicy.attempt());
    // than the allowed retry count
    throw new IOException("Unable to init multipart upload to " + mKey, lastException);
}
Also used : AmazonClientException(com.amazonaws.AmazonClientException) InitiateMultipartUploadRequest(com.amazonaws.services.s3.model.InitiateMultipartUploadRequest) IOException(java.io.IOException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Aggregations

ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)566 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)191 ByteArrayInputStream (java.io.ByteArrayInputStream)157 Test (org.junit.Test)143 IOException (java.io.IOException)101 InputStream (java.io.InputStream)80 File (java.io.File)62 AmazonClientException (com.amazonaws.AmazonClientException)61 AmazonServiceException (com.amazonaws.AmazonServiceException)61 S3Object (com.amazonaws.services.s3.model.S3Object)59 AmazonS3 (com.amazonaws.services.s3.AmazonS3)54 Date (java.util.Date)46 S3FileTransferRequestParamsDto (org.finra.herd.model.dto.S3FileTransferRequestParamsDto)34 GetObjectMetadataRequest (com.amazonaws.services.s3.model.GetObjectMetadataRequest)33 PutObjectResult (com.amazonaws.services.s3.model.PutObjectResult)32 GetObjectRequest (com.amazonaws.services.s3.model.GetObjectRequest)30 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)29 Upload (com.amazonaws.services.s3.transfer.Upload)26 SdkClientException (com.amazonaws.SdkClientException)24 CopyObjectRequest (com.amazonaws.services.s3.model.CopyObjectRequest)24