use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project crate by crate.
the class S3BlobContainer method executeSingleUpload.
/**
* Uploads a blob using a single upload request
*/
void executeSingleUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
// Extra safety checks
if (blobSize > MAX_FILE_SIZE.getBytes()) {
throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
}
if (blobSize > blobStore.bufferSizeInBytes()) {
throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
}
final ObjectMetadata md = new ObjectMetadata();
md.setContentLength(blobSize);
if (blobStore.serverSideEncryption()) {
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
putRequest.setStorageClass(blobStore.getStorageClass());
putRequest.setCannedAcl(blobStore.getCannedACL());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().putObject(putRequest);
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
}
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project alluxio by Alluxio.
the class S3AUnderFileSystem method copyObject.
@Override
protected boolean copyObject(String src, String dst) {
LOG.debug("Copying {} to {}", src, dst);
// Retry copy for a few times, in case some AWS internal errors happened during copy.
int retries = 3;
for (int i = 0; i < retries; i++) {
try {
CopyObjectRequest request = new CopyObjectRequest(mBucketName, src, mBucketName, dst);
if (mUfsConf.getBoolean(PropertyKey.UNDERFS_S3_SERVER_SIDE_ENCRYPTION_ENABLED)) {
ObjectMetadata meta = new ObjectMetadata();
meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
request.setNewObjectMetadata(meta);
}
mManager.copy(request).waitForCopyResult();
return true;
} catch (AmazonClientException | InterruptedException e) {
LOG.error("Failed to copy file {} to {}", src, dst, e);
if (i != retries - 1) {
LOG.error("Retrying copying file {} to {}", src, dst);
}
}
}
LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries);
return false;
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project alluxio by Alluxio.
the class S3AUnderFileSystem method createEmptyObject.
@Override
public boolean createEmptyObject(String key) {
try {
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(0);
meta.setContentMD5(DIR_HASH);
meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
mClient.putObject(new PutObjectRequest(mBucketName, key, new ByteArrayInputStream(new byte[0]), meta));
return true;
} catch (AmazonClientException e) {
LOG.error("Failed to create object: {}", key, e);
return false;
}
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project alluxio by Alluxio.
the class S3ALowLevelOutputStream method initMultiPartUpload.
/**
* Initializes multipart upload.
*/
private void initMultiPartUpload(AmazonS3 s3Client) throws IOException {
// Generate the object metadata by setting server side encryption, md5 checksum,
// and encoding as octet stream since no assumptions are made about the file type
ObjectMetadata meta = new ObjectMetadata();
if (mSseEnabled) {
meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
if (mHash != null) {
meta.setContentMD5(Base64.encodeAsString(mHash.digest()));
}
meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
AmazonClientException lastException;
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(mBucketName, mKey).withObjectMetadata(meta);
do {
try {
mUploadId = s3Client.initiateMultipartUpload(initRequest).getUploadId();
return;
} catch (AmazonClientException e) {
lastException = e;
}
} while (mRetryPolicy.attempt());
// than the allowed retry count
throw new IOException("Unable to init multipart upload to " + mKey, lastException);
}
Aggregations