use of software.amazon.awssdk.services.s3.model.CompletedPart in project beam by apache.
the class S3FileSystem method multipartCopy.
@VisibleForTesting
CompleteMultipartUploadResponse multipartCopy(S3ResourceId sourcePath, S3ResourceId destinationPath, HeadObjectResponse sourceObjectHead) throws SdkServiceException {
CreateMultipartUploadRequest initiateUploadRequest = CreateMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).storageClass(config.getS3StorageClass()).metadata(sourceObjectHead.metadata()).serverSideEncryption(config.getSSEAlgorithm()).ssekmsKeyId(config.getSSEKMSKeyId()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
CreateMultipartUploadResponse createMultipartUploadResponse = s3Client.get().createMultipartUpload(initiateUploadRequest);
final String uploadId = createMultipartUploadResponse.uploadId();
List<CompletedPart> completedParts = new ArrayList<>();
final long objectSize = sourceObjectHead.contentLength();
CopyPartResult copyPartResult;
CompletedPart completedPart;
// without using S3FileSystem.copy in the future
if (objectSize == 0) {
final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().bucket(sourcePath.getBucket()).key(sourcePath.getKey()).copySource(sourcePath.getBucket() + "/" + sourcePath.getKey()).uploadId(uploadId).partNumber(1).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).copySourceSSECustomerKey(config.getSSECustomerKey().getKey()).copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
} else {
long bytePosition = 0;
Integer uploadBufferSizeBytes = config.getS3UploadBufferSizeBytes();
// Amazon parts are 1-indexed, not zero-indexed.
for (int partNumber = 1; bytePosition < objectSize; partNumber++) {
final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().bucket(sourcePath.getBucket()).key(sourcePath.getKey()).copySource(destinationPath.getBucket() + "/" + sourcePath.getKey()).uploadId(uploadId).partNumber(partNumber).copySourceRange(String.format("bytes=%s-%s", bytePosition, Math.min(objectSize - 1, bytePosition + uploadBufferSizeBytes - 1))).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).copySourceSSECustomerKey(config.getSSECustomerKey().getKey()).copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
bytePosition += uploadBufferSizeBytes;
}
}
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder().parts(completedParts).build();
CompleteMultipartUploadRequest completeUploadRequest = CompleteMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).uploadId(uploadId).multipartUpload(completedMultipartUpload).build();
return s3Client.get().completeMultipartUpload(completeUploadRequest);
}
use of software.amazon.awssdk.services.s3.model.CompletedPart in project beam by apache.
the class S3WritableByteChannel method flush.
private void flush() throws IOException {
uploadBuffer.flip();
ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array(), 0, uploadBuffer.limit());
UploadPartRequest request = UploadPartRequest.builder().bucket(path.getBucket()).key(path.getKey()).uploadId(uploadId).partNumber(partNumber++).contentLength((long) uploadBuffer.limit()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).sseCustomerKeyMD5(config.getSSECustomerKey().getMD5()).contentMD5(Base64.getEncoder().encodeToString(md5.digest())).build();
UploadPartResponse response;
try {
response = s3Client.uploadPart(request, RequestBody.fromInputStream(inputStream, request.contentLength()));
} catch (SdkClientException e) {
throw new IOException(e);
}
CompletedPart part = CompletedPart.builder().partNumber(request.partNumber()).eTag(response.eTag()).build();
uploadBuffer.clear();
md5.reset();
completedParts.add(part);
}
use of software.amazon.awssdk.services.s3.model.CompletedPart in project pravega by pravega.
the class S3ChunkStorage method doConcat.
@Override
public int doConcat(ConcatArgument[] chunks) throws ChunkStorageException {
int totalBytesConcatenated = 0;
String targetPath = getObjectPath(chunks[0].getName());
String uploadId = null;
boolean isCompleted = false;
try {
int partNumber = 1;
val response = client.createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(config.getBucket()).key(targetPath).build());
uploadId = response.uploadId();
// check whether the target exists
if (!checkExists(chunks[0].getName())) {
throw new ChunkNotFoundException(chunks[0].getName(), "doConcat - Target segment does not exist");
}
CompletedPart[] completedParts = new CompletedPart[chunks.length];
// Copy the parts
for (int i = 0; i < chunks.length; i++) {
if (0 != chunks[i].getLength()) {
val sourceHandle = chunks[i];
long objectSize = client.headObject(HeadObjectRequest.builder().bucket(this.config.getBucket()).key(getObjectPath(sourceHandle.getName())).build()).contentLength();
Preconditions.checkState(objectSize >= chunks[i].getLength(), "Length of object should be equal or greater. Length on LTS={} provided={}", objectSize, chunks[i].getLength());
UploadPartCopyRequest copyRequest = UploadPartCopyRequest.builder().destinationBucket(config.getBucket()).destinationKey(targetPath).sourceBucket(config.getBucket()).sourceKey(getObjectPath(sourceHandle.getName())).uploadId(uploadId).partNumber(partNumber).copySourceRange(getRangeWithLength(0, chunks[i].getLength())).build();
val copyResult = client.uploadPartCopy(copyRequest);
val eTag = copyResult.copyPartResult().eTag();
completedParts[i] = CompletedPart.builder().partNumber(partNumber).eTag(eTag).build();
partNumber++;
totalBytesConcatenated += chunks[i].getLength();
}
}
// Close the upload
CompletedMultipartUpload completedRequest = CompletedMultipartUpload.builder().parts(completedParts).build();
client.completeMultipartUpload(CompleteMultipartUploadRequest.builder().bucket(config.getBucket()).key(targetPath).multipartUpload(completedRequest).uploadId(uploadId).build());
isCompleted = true;
} catch (RuntimeException e) {
// Error message is REC_CATCH_EXCEPTION: Exception is caught when Exception is not thrown
throw convertException(chunks[0].getName(), "doConcat", e);
} catch (Exception e) {
throw convertException(chunks[0].getName(), "doConcat", e);
} finally {
if (!isCompleted && null != uploadId) {
try {
client.abortMultipartUpload(AbortMultipartUploadRequest.builder().bucket(config.getBucket()).key(targetPath).uploadId(uploadId).build());
} catch (Exception e) {
throw convertException(chunks[0].getName(), "doConcat", e);
}
}
}
return totalBytesConcatenated;
}
use of software.amazon.awssdk.services.s3.model.CompletedPart in project data-transfer-project by google.
the class BackblazeDataTransferClient method uploadFileUsingMultipartUpload.
private String uploadFileUsingMultipartUpload(String fileKey, File file, long contentLength) throws IOException, AwsServiceException, SdkClientException {
List<CompletedPart> completedParts = new ArrayList<>();
CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder().bucket(bucketName).key(fileKey).build();
CreateMultipartUploadResponse createMultipartUploadResponse = s3Client.createMultipartUpload(createMultipartUploadRequest);
long filePosition = 0;
try (InputStream fileInputStream = new FileInputStream(file)) {
for (int i = 1; filePosition < contentLength; i++) {
// Because the last part could be smaller than others, adjust the part size as needed
long partSize = Math.min(partSizeForMultiPartUpload, (contentLength - filePosition));
UploadPartRequest uploadRequest = UploadPartRequest.builder().bucket(bucketName).key(fileKey).uploadId(createMultipartUploadResponse.uploadId()).partNumber(i).build();
RequestBody requestBody = RequestBody.fromInputStream(fileInputStream, partSize);
UploadPartResponse uploadPartResponse = s3Client.uploadPart(uploadRequest, requestBody);
completedParts.add(CompletedPart.builder().partNumber(i).eTag(uploadPartResponse.eTag()).build());
filePosition += partSize;
}
}
CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder().bucket(bucketName).key(fileKey).uploadId(createMultipartUploadResponse.uploadId()).multipartUpload(CompletedMultipartUpload.builder().parts(completedParts).build()).build();
CompleteMultipartUploadResponse completeMultipartUploadResponse = s3Client.completeMultipartUpload(completeMultipartUploadRequest);
return completeMultipartUploadResponse.versionId();
}
use of software.amazon.awssdk.services.s3.model.CompletedPart in project aws-doc-sdk-examples by awsdocs.
the class S3ObjectOperations method multipartUpload.
/**
* Upload an object in parts
*/
private static void multipartUpload(String bucketName, String key) throws IOException {
int mB = 1024 * 1024;
// snippet-start:[s3.java2.s3_object_operations.upload_multi_part]
// First create a multipart upload and get the upload id
CreateMultipartUploadRequest createMultipartUploadRequest = CreateMultipartUploadRequest.builder().bucket(bucketName).key(key).build();
CreateMultipartUploadResponse response = s3.createMultipartUpload(createMultipartUploadRequest);
String uploadId = response.uploadId();
System.out.println(uploadId);
// Upload all the different parts of the object
UploadPartRequest uploadPartRequest1 = UploadPartRequest.builder().bucket(bucketName).key(key).uploadId(uploadId).partNumber(1).build();
String etag1 = s3.uploadPart(uploadPartRequest1, RequestBody.fromByteBuffer(getRandomByteBuffer(5 * mB))).eTag();
CompletedPart part1 = CompletedPart.builder().partNumber(1).eTag(etag1).build();
UploadPartRequest uploadPartRequest2 = UploadPartRequest.builder().bucket(bucketName).key(key).uploadId(uploadId).partNumber(2).build();
String etag2 = s3.uploadPart(uploadPartRequest2, RequestBody.fromByteBuffer(getRandomByteBuffer(3 * mB))).eTag();
CompletedPart part2 = CompletedPart.builder().partNumber(2).eTag(etag2).build();
// Finally call completeMultipartUpload operation to tell S3 to merge all uploaded
// parts and finish the multipart operation.
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder().parts(part1, part2).build();
CompleteMultipartUploadRequest completeMultipartUploadRequest = CompleteMultipartUploadRequest.builder().bucket(bucketName).key(key).uploadId(uploadId).multipartUpload(completedMultipartUpload).build();
s3.completeMultipartUpload(completeMultipartUploadRequest);
// snippet-end:[s3.java2.s3_object_operations.upload_multi_part]
}
Aggregations