use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project beam by apache.
the class S3WritableByteChannel method flush.
private void flush() throws IOException {
uploadBuffer.flip();
ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array(), 0, uploadBuffer.limit());
UploadPartRequest request = new UploadPartRequest().withBucketName(path.getBucket()).withKey(path.getKey()).withUploadId(uploadId).withPartNumber(partNumber++).withPartSize(uploadBuffer.limit()).withMD5Digest(Base64.encodeAsString(md5.digest())).withInputStream(inputStream);
request.setSSECustomerKey(config.getSSECustomerKey());
UploadPartResult result;
try {
result = amazonS3.uploadPart(request);
} catch (AmazonClientException e) {
throw new IOException(e);
}
uploadBuffer.clear();
md5.reset();
eTags.add(result.getPartETag());
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project beam by apache.
the class S3WritableByteChannelTest method write.
private void write(AmazonS3 mockAmazonS3, Supplier channelSupplier, S3ResourceId path, String sseAlgorithm, String sseCustomerKeyMd5, SSEAwsKeyManagementParams sseAwsKeyManagementParams, long s3UploadBufferSizeBytes, boolean bucketKeyEnabled, boolean writeReadOnlyBuffer) throws IOException {
InitiateMultipartUploadResult initiateMultipartUploadResult = new InitiateMultipartUploadResult();
initiateMultipartUploadResult.setUploadId("upload-id");
if (sseAlgorithm != null) {
initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
}
if (sseCustomerKeyMd5 != null) {
initiateMultipartUploadResult.setSSECustomerKeyMd5(sseCustomerKeyMd5);
}
if (sseAwsKeyManagementParams != null) {
sseAlgorithm = "aws:kms";
initiateMultipartUploadResult.setSSEAlgorithm(sseAlgorithm);
}
initiateMultipartUploadResult.setBucketKeyEnabled(bucketKeyEnabled);
doReturn(initiateMultipartUploadResult).when(mockAmazonS3).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
InitiateMultipartUploadResult mockInitiateMultipartUploadResult = mockAmazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()));
assertEquals(sseAlgorithm, mockInitiateMultipartUploadResult.getSSEAlgorithm());
assertEquals(bucketKeyEnabled, mockInitiateMultipartUploadResult.getBucketKeyEnabled());
assertEquals(sseCustomerKeyMd5, mockInitiateMultipartUploadResult.getSSECustomerKeyMd5());
UploadPartResult result = new UploadPartResult();
result.setETag("etag");
if (sseCustomerKeyMd5 != null) {
result.setSSECustomerKeyMd5(sseCustomerKeyMd5);
}
doReturn(result).when(mockAmazonS3).uploadPart(any(UploadPartRequest.class));
UploadPartResult mockUploadPartResult = mockAmazonS3.uploadPart(new UploadPartRequest());
assertEquals(sseCustomerKeyMd5, mockUploadPartResult.getSSECustomerKeyMd5());
int contentSize = 34_078_720;
ByteBuffer uploadContent = ByteBuffer.allocate((int) (contentSize * 2.5));
for (int i = 0; i < contentSize; i++) {
uploadContent.put((byte) 0xff);
}
uploadContent.flip();
S3WritableByteChannel channel = channelSupplier.get();
int uploadedSize = channel.write(writeReadOnlyBuffer ? uploadContent.asReadOnlyBuffer() : uploadContent);
assertEquals(contentSize, uploadedSize);
CompleteMultipartUploadResult completeMultipartUploadResult = new CompleteMultipartUploadResult();
doReturn(completeMultipartUploadResult).when(mockAmazonS3).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
channel.close();
verify(mockAmazonS3, times(2)).initiateMultipartUpload(notNull(InitiateMultipartUploadRequest.class));
int partQuantity = (int) Math.ceil((double) contentSize / s3UploadBufferSizeBytes) + 1;
verify(mockAmazonS3, times(partQuantity)).uploadPart(notNull(UploadPartRequest.class));
verify(mockAmazonS3, times(1)).completeMultipartUpload(notNull(CompleteMultipartUploadRequest.class));
verifyNoMoreInteractions(mockAmazonS3);
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project aws-doc-sdk-examples by awsdocs.
the class S3ObjectOperations method multipartUpload.
/**
* Upload an object in parts
*/
private static void multipartUpload(String bucketName, String key) throws IOException {
int mB = 1024 * 1024;
// snippet-start:[s3.java2.s3_object_operations.upload_multi_part]
// First create a multipart upload and get the upload id
CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder().bucket(bucketName).key(key).build();
CreateMultipartUploadResponse response = s3.createMultipartUpload(createMultipartUploadRequest);
String uploadId = response.uploadId();
System.out.println(uploadId);
// Upload all the different parts of the object
UploadPartRequest uploadPartRequest1 = UploadPartRequest.builder().bucket(bucketName).key(key).uploadId(uploadId).partNumber(1).build();
String etag1 = s3.uploadPart(uploadPartRequest1, RequestBody.fromByteBuffer(getRandomByteBuffer(5 * mB))).eTag();
CompletedPart part1 = CompletedPart.builder().partNumber(1).eTag(etag1).build();
UploadPartRequest uploadPartRequest2 = UploadPartRequest.builder().bucket(bucketName).key(key).uploadId(uploadId).partNumber(2).build();
String etag2 = s3.uploadPart(uploadPartRequest2, RequestBody.fromByteBuffer(getRandomByteBuffer(3 * mB))).eTag();
CompletedPart part2 = CompletedPart.builder().partNumber(2).eTag(etag2).build();
// Finally call completeMultipartUpload operation to tell S3 to merge all uploaded
// parts and finish the multipart operation.
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder().parts(part1, part2).build();
CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder().bucket(bucketName).key(key).uploadId(uploadId).multipartUpload(completedMultipartUpload).build();
s3.completeMultipartUpload(completeMultipartUploadRequest);
// snippet-end:[s3.java2.s3_object_operations.upload_multi_part]
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project android-simpl3r by jgilfelt.
the class Uploader method start.
/**
* Initiate a multipart file upload to Amazon S3
*
* @return the URL of a successfully uploaded file
*/
public String start() {
// initialize
List<PartETag> partETags = new ArrayList<PartETag>();
final long contentLength = file.length();
long filePosition = 0;
int startPartNumber = 1;
userInterrupted = false;
userAborted = false;
bytesUploaded = 0;
// check if we can resume an incomplete download
String uploadId = getCachedUploadId();
if (uploadId != null) {
// we can resume the download
Log.i(TAG, "resuming upload for " + uploadId);
// get the cached etags
List<PartETag> cachedEtags = getCachedPartEtags();
partETags.addAll(cachedEtags);
// calculate the start position for resume
startPartNumber = cachedEtags.size() + 1;
filePosition = (startPartNumber - 1) * partSize;
bytesUploaded = filePosition;
Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);
} else {
// initiate a new multi part upload
Log.i(TAG, "initiating new upload");
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
configureInitiateRequest(initRequest);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
uploadId = initResponse.getUploadId();
}
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId);
for (int k = startPartNumber; filePosition < contentLength; k++) {
long thisPartSize = Math.min(partSize, (contentLength - filePosition));
Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key).withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file).withPartSize(thisPartSize);
ProgressListener s3progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
// TODO calling shutdown too brute force?
if (userInterrupted) {
s3Client.shutdown();
throw new UploadIterruptedException("User interrupted");
} else if (userAborted) {
// aborted requests cannot be resumed, so clear any cached etags
clearProgressCache();
s3Client.abortMultipartUpload(abortRequest);
s3Client.shutdown();
}
bytesUploaded += progressEvent.getBytesTransfered();
// Log.d(TAG, "bytesUploaded=" + bytesUploaded);
// broadcast progress
float fpercent = ((bytesUploaded * 100) / contentLength);
int percent = Math.round(fpercent);
if (progressListener != null) {
progressListener.progressChanged(progressEvent, bytesUploaded, percent);
}
}
};
uploadRequest.setProgressListener(s3progressListener);
UploadPartResult result = s3Client.uploadPart(uploadRequest);
partETags.add(result.getPartETag());
// cache the part progress for this upload
if (k == 1) {
initProgressCache(uploadId);
}
// store part etag
cachePartEtag(result);
filePosition += thisPartSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags);
CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
bytesUploaded = 0;
Log.i(TAG, "upload complete for " + uploadId);
clearProgressCache();
return result.getLocation();
}
Aggregations