use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystemTest method testMultipartCopy.
private void testMultipartCopy(S3FileSystem s3FileSystem, SSECustomerKey sseCustomerKey, long s3UploadBufferSizeBytes) throws IOException {
S3ResourceId sourcePath = S3ResourceId.fromUri(s3FileSystem.getScheme() + "://bucket/from");
S3ResourceId destinationPath = S3ResourceId.fromUri(s3FileSystem.getScheme() + "://bucket/to");
CreateMultipartUploadResponse.Builder builder = CreateMultipartUploadResponse.builder().uploadId("upload-id");
String sseCustomerKeyMd5 = toMd5(sseCustomerKey);
if (sseCustomerKeyMd5 != null) {
builder.sseCustomerKeyMD5(sseCustomerKeyMd5);
}
CreateMultipartUploadResponse createMultipartUploadResponse = builder.build();
when(s3FileSystem.getS3Client().createMultipartUpload(any(CreateMultipartUploadRequest.class))).thenReturn(createMultipartUploadResponse);
assertEquals(sseCustomerKeyMd5, s3FileSystem.getS3Client().createMultipartUpload(CreateMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).build()).sseCustomerKeyMD5());
HeadObjectResponse.Builder headObjectResponseBuilder = HeadObjectResponse.builder().contentLength((long) (s3UploadBufferSizeBytes * 1.5)).contentEncoding("read-seek-efficient");
if (sseCustomerKeyMd5 != null) {
headObjectResponseBuilder.sseCustomerKeyMD5(sseCustomerKeyMd5);
}
HeadObjectResponse headObjectResponse = headObjectResponseBuilder.build();
assertGetObjectHead(s3FileSystem, createObjectHeadRequest(sourcePath, sseCustomerKey), sseCustomerKeyMd5, headObjectResponse);
CopyPartResult copyPartResult1 = CopyPartResult.builder().eTag("etag-1").build();
CopyPartResult copyPartResult2 = CopyPartResult.builder().eTag("etag-2").build();
UploadPartCopyResponse.Builder uploadPartCopyResponseBuilder1 = UploadPartCopyResponse.builder().copyPartResult(copyPartResult1);
UploadPartCopyResponse.Builder uploadPartCopyResponseBuilder2 = UploadPartCopyResponse.builder().copyPartResult(copyPartResult2);
if (sseCustomerKeyMd5 != null) {
uploadPartCopyResponseBuilder1.sseCustomerKeyMD5(sseCustomerKeyMd5);
uploadPartCopyResponseBuilder2.sseCustomerKeyMD5(sseCustomerKeyMd5);
}
UploadPartCopyResponse uploadPartCopyResponse1 = uploadPartCopyResponseBuilder1.build();
UploadPartCopyResponse uploadPartCopyResponse2 = uploadPartCopyResponseBuilder2.build();
UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().sseCustomerKey(sseCustomerKey.getKey()).build();
when(s3FileSystem.getS3Client().uploadPartCopy(any(UploadPartCopyRequest.class))).thenReturn(uploadPartCopyResponse1).thenReturn(uploadPartCopyResponse2);
assertEquals(sseCustomerKeyMd5, s3FileSystem.getS3Client().uploadPartCopy(uploadPartCopyRequest).sseCustomerKeyMD5());
s3FileSystem.multipartCopy(sourcePath, destinationPath, headObjectResponse);
verify(s3FileSystem.getS3Client(), times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystemTest method deleteThousandsOfObjectsInMultipleBucketsWithS3Options.
@Test
public void deleteThousandsOfObjectsInMultipleBucketsWithS3Options() throws IOException {
S3FileSystem s3FileSystem = buildMockedS3FileSystem(s3Options());
List<String> buckets = ImmutableList.of("bucket1", "bucket2");
List<String> keys = new ArrayList<>();
for (int i = 0; i < 2500; i++) {
keys.add(String.format("key-%d", i));
}
List<S3ResourceId> paths = new ArrayList<>();
for (String bucket : buckets) {
for (String key : keys) {
paths.add(S3ResourceId.fromComponents("s3", bucket, key));
}
}
s3FileSystem.delete(paths);
// Should require 6 calls to delete 2500 objects in each of 2 buckets.
verify(s3FileSystem.getS3Client(), times(6)).deleteObjects(any(DeleteObjectsRequest.class));
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3WritableByteChannelTest method writeFromConfig.
private void writeFromConfig(S3FileSystemConfiguration config, boolean writeReadOnlyBuffer) throws IOException {
S3Client mockS3Client = mock(S3Client.class, withSettings().defaultAnswer(RETURNS_SMART_NULLS));
S3ResourceId path = S3ResourceId.fromUri("s3://bucket/dir/file");
Supplier channel = () -> new S3WritableByteChannel(mockS3Client, path, "text/plain", config);
write(mockS3Client, channel, path, config.getSSEAlgorithm(), toMd5(config.getSSECustomerKey()), config.getSSEKMSKeyId(), config.getS3UploadBufferSizeBytes(), config.getBucketKeyEnabled(), writeReadOnlyBuffer);
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3WritableByteChannelTest method writeFromOptions.
private void writeFromOptions(S3Options options, boolean writeReadOnlyBuffer) throws IOException {
S3Client mockS3Client = mock(S3Client.class, withSettings().defaultAnswer(RETURNS_SMART_NULLS));
S3ResourceId path = S3ResourceId.fromUri("s3://bucket/dir/file");
Supplier channel = () -> new S3WritableByteChannel(mockS3Client, path, "text/plain", S3FileSystemConfiguration.fromS3Options(options));
write(mockS3Client, channel, path, options.getSSEAlgorithm(), toMd5(options.getSSECustomerKey()), options.getSSEKMSKeyId(), options.getS3UploadBufferSizeBytes(), options.getBucketKeyEnabled(), writeReadOnlyBuffer);
}
use of software.amazon.awssdk.services.s3.model.Bucket in project hazelcast by hazelcast.
the class S3MockTest method generateAndUploadObjects.
private void generateAndUploadObjects(String bucketName, String prefix, int objectCount, int lineCount, String textPrefix) {
StringBuilder builder = new StringBuilder();
for (int i = 0; i < objectCount; i++) {
range(0, lineCount).forEach(j -> builder.append(textPrefix).append(j).append(lineSeparator()));
PutObjectRequest putObjectRequest = PutObjectRequest.builder().bucket(bucketName).key(prefix + i).build();
s3Client.putObject(putObjectRequest, RequestBody.fromString(builder.toString()));
builder.setLength(0);
}
}
Aggregations