use of software.amazon.awssdk.services.s3.model.Bucket in project flink by apache.
the class MinioTestContainerTest method testBucketCreation.
@Test
public void testBucketCreation() {
final String bucketName = "other-bucket";
final Bucket otherBucket = getClient().createBucket(bucketName);
assertThat(otherBucket).isNotNull();
assertThat(otherBucket).extracting(Bucket::getName).isEqualTo(bucketName);
assertThat(getClient().listBuckets()).map(Bucket::getName).containsExactlyInAnyOrder(getTestContainer().getDefaultBucketName(), bucketName);
}
use of software.amazon.awssdk.services.s3.model.Bucket in project flink by apache.
the class AWSServicesTestUtils method listBucketObjects.
public static List<S3Object> listBucketObjects(S3AsyncClient s3, String bucketName) throws ExecutionException, InterruptedException {
ListObjectsRequest listObjects = ListObjectsRequest.builder().bucket(bucketName).build();
CompletableFuture<ListObjectsResponse> res = s3.listObjects(listObjects);
return res.get().contents();
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystem method expandGlob.
private ExpandedGlob expandGlob(S3ResourceId glob) {
// The S3 API can list objects, filtered by prefix, but not by wildcard.
// Here, we find the longest prefix without wildcard "*",
// then filter the results with a regex.
checkArgument(glob.isWildcard(), "isWildcard");
String keyPrefix = glob.getKeyNonWildcardPrefix();
Pattern wildcardRegexp = Pattern.compile(wildcardToRegexp(glob.getKey()));
LOG.debug("expanding bucket {}, prefix {}, against pattern {}", glob.getBucket(), keyPrefix, wildcardRegexp);
ImmutableList.Builder<S3ResourceId> expandedPaths = ImmutableList.builder();
String continuationToken = null;
do {
ListObjectsV2Request request = ListObjectsV2Request.builder().bucket(glob.getBucket()).prefix(keyPrefix).continuationToken(continuationToken).build();
ListObjectsV2Response response;
try {
response = s3Client.get().listObjectsV2(request);
} catch (SdkServiceException e) {
return ExpandedGlob.create(glob, new IOException(e));
}
continuationToken = response.nextContinuationToken();
List<S3Object> contents = response.contents();
contents.stream().filter(s3Object -> wildcardRegexp.matcher(s3Object.key()).matches()).forEach(s3Object -> {
S3ResourceId expandedPath = S3ResourceId.fromComponents(glob.getScheme(), glob.getBucket(), s3Object.key()).withSize(s3Object.size()).withLastModified(Date.from(s3Object.lastModified()));
LOG.debug("Expanded S3 object path {}", expandedPath);
expandedPaths.add(expandedPath);
});
} while (continuationToken != null);
return ExpandedGlob.create(glob, expandedPaths.build());
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystem method multipartCopy.
@VisibleForTesting
CompleteMultipartUploadResponse multipartCopy(S3ResourceId sourcePath, S3ResourceId destinationPath, HeadObjectResponse sourceObjectHead) throws SdkServiceException {
CreateMultipartUploadRequest initiateUploadRequest = CreateMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).storageClass(config.getS3StorageClass()).metadata(sourceObjectHead.metadata()).serverSideEncryption(config.getSSEAlgorithm()).ssekmsKeyId(config.getSSEKMSKeyId()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
CreateMultipartUploadResponse createMultipartUploadResponse = s3Client.get().createMultipartUpload(initiateUploadRequest);
final String uploadId = createMultipartUploadResponse.uploadId();
List<CompletedPart> completedParts = new ArrayList<>();
final long objectSize = sourceObjectHead.contentLength();
CopyPartResult copyPartResult;
CompletedPart completedPart;
// without using S3FileSystem.copy in the future
if (objectSize == 0) {
final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().bucket(sourcePath.getBucket()).key(sourcePath.getKey()).copySource(sourcePath.getBucket() + "/" + sourcePath.getKey()).uploadId(uploadId).partNumber(1).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).copySourceSSECustomerKey(config.getSSECustomerKey().getKey()).copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
} else {
long bytePosition = 0;
Integer uploadBufferSizeBytes = config.getS3UploadBufferSizeBytes();
// Amazon parts are 1-indexed, not zero-indexed.
for (int partNumber = 1; bytePosition < objectSize; partNumber++) {
final UploadPartCopyRequest uploadPartCopyRequest = UploadPartCopyRequest.builder().bucket(sourcePath.getBucket()).key(sourcePath.getKey()).copySource(destinationPath.getBucket() + "/" + sourcePath.getKey()).uploadId(uploadId).partNumber(partNumber).copySourceRange(String.format("bytes=%s-%s", bytePosition, Math.min(objectSize - 1, bytePosition + uploadBufferSizeBytes - 1))).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).copySourceSSECustomerKey(config.getSSECustomerKey().getKey()).copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
bytePosition += uploadBufferSizeBytes;
}
}
CompletedMultipartUpload completedMultipartUpload = CompletedMultipartUpload.builder().parts(completedParts).build();
CompleteMultipartUploadRequest completeUploadRequest = CompleteMultipartUploadRequest.builder().bucket(destinationPath.getBucket()).key(destinationPath.getKey()).uploadId(uploadId).multipartUpload(completedMultipartUpload).build();
return s3Client.get().completeMultipartUpload(completeUploadRequest);
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystem method delete.
private void delete(String bucket, Collection<String> keys) throws IOException {
checkArgument(keys.size() <= MAX_DELETE_OBJECTS_PER_REQUEST, "only %s keys can be deleted per request, but got %s", MAX_DELETE_OBJECTS_PER_REQUEST, keys.size());
List<ObjectIdentifier> deleteKeyVersions = keys.stream().map((key) -> ObjectIdentifier.builder().key(key).build()).collect(Collectors.toList());
Delete delete = Delete.builder().objects(deleteKeyVersions).quiet(true).build();
DeleteObjectsRequest deleteObjectsRequest = DeleteObjectsRequest.builder().bucket(bucket).delete(delete).build();
try {
s3Client.get().deleteObjects(deleteObjectsRequest);
} catch (SdkServiceException e) {
throw new IOException(e);
}
}
Aggregations