use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3ReadableSeekableByteChannel method read.
@Override
public int read(ByteBuffer destinationBuffer) throws IOException {
if (!isOpen()) {
throw new ClosedChannelException();
}
if (!destinationBuffer.hasRemaining()) {
return 0;
}
if (position == contentLength) {
return -1;
}
if (s3ResponseInputStream == null) {
GetObjectRequest.Builder builder = GetObjectRequest.builder().bucket(path.getBucket()).key(path.getKey()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm());
if (position > 0) {
builder.range(String.format("bytes=%s-%s", position, contentLength));
}
GetObjectRequest request = builder.build();
try {
s3ResponseInputStream = s3Client.getObject(request);
} catch (SdkClientException e) {
throw new IOException(e);
}
s3ObjectContentChannel = Channels.newChannel(new BufferedInputStream(s3ResponseInputStream, 1024 * 1024));
}
int totalBytesRead = 0;
int bytesRead = 0;
do {
totalBytesRead += bytesRead;
try {
bytesRead = s3ObjectContentChannel.read(destinationBuffer);
} catch (SdkServiceException e) {
throw new IOException(e);
}
} while (bytesRead > 0);
position += totalBytesRead;
return totalBytesRead;
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3WritableByteChannel method flush.
private void flush() throws IOException {
uploadBuffer.flip();
ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array(), 0, uploadBuffer.limit());
UploadPartRequest request = UploadPartRequest.builder().bucket(path.getBucket()).key(path.getKey()).uploadId(uploadId).partNumber(partNumber++).contentLength((long) uploadBuffer.limit()).sseCustomerKey(config.getSSECustomerKey().getKey()).sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm()).sseCustomerKeyMD5(config.getSSECustomerKey().getMD5()).contentMD5(Base64.getEncoder().encodeToString(md5.digest())).build();
UploadPartResponse response;
try {
response = s3Client.uploadPart(request, RequestBody.fromInputStream(inputStream, request.contentLength()));
} catch (SdkClientException e) {
throw new IOException(e);
}
CompletedPart part = CompletedPart.builder().partNumber(request.partNumber()).eTag(response.eTag()).build();
uploadBuffer.clear();
md5.reset();
completedParts.add(part);
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystemTest method matchNonGlobNotReadSeekEfficient.
@Test
public void matchNonGlobNotReadSeekEfficient() {
S3FileSystem s3FileSystem = buildMockedS3FileSystem(s3Config("s3"));
S3ResourceId path = S3ResourceId.fromUri("s3://testbucket/testdirectory/filethatexists");
long lastModifiedMillis = 1540000000000L;
HeadObjectResponse headObjectResponse = HeadObjectResponse.builder().contentLength(100L).lastModified(Instant.ofEpochMilli(lastModifiedMillis)).contentEncoding("gzip").build();
when(s3FileSystem.getS3Client().headObject(argThat(new GetHeadObjectRequestMatcher(HeadObjectRequest.builder().bucket(path.getBucket()).key(path.getKey()).build())))).thenReturn(headObjectResponse);
MatchResult result = s3FileSystem.matchNonGlobPath(path);
assertThat(result, MatchResultMatcher.create(ImmutableList.of(MatchResult.Metadata.builder().setSizeBytes(100).setLastModifiedMillis(lastModifiedMillis).setResourceId(path).setIsReadSeekEfficient(false).build())));
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystemTest method testAtomicCopy.
private void testAtomicCopy(S3FileSystem s3FileSystem, SSECustomerKey sseCustomerKey) throws IOException {
S3ResourceId sourcePath = S3ResourceId.fromUri(s3FileSystem.getScheme() + "://bucket/from");
S3ResourceId destinationPath = S3ResourceId.fromUri(s3FileSystem.getScheme() + "://bucket/to");
CopyObjectResponse.Builder builder = CopyObjectResponse.builder();
String sseCustomerKeyMd5 = toMd5(sseCustomerKey);
if (sseCustomerKeyMd5 != null) {
builder.sseCustomerKeyMD5(sseCustomerKeyMd5);
}
CopyObjectResponse copyObjectResponse = builder.build();
CopyObjectRequest copyObjectRequest = CopyObjectRequest.builder().copySource(sourcePath.getBucket() + "/" + sourcePath.getKey()).destinationBucket(destinationPath.getBucket()).destinationBucket(destinationPath.getKey()).sseCustomerKey(sseCustomerKey.getKey()).copySourceSSECustomerAlgorithm(sseCustomerKey.getAlgorithm()).build();
when(s3FileSystem.getS3Client().copyObject(any(CopyObjectRequest.class))).thenReturn(copyObjectResponse);
assertEquals(sseCustomerKeyMd5, s3FileSystem.getS3Client().copyObject(copyObjectRequest).sseCustomerKeyMD5());
HeadObjectResponse headObjectResponse = HeadObjectResponse.builder().build();
s3FileSystem.atomicCopy(sourcePath, destinationPath, headObjectResponse);
verify(s3FileSystem.getS3Client(), times(2)).copyObject(any(CopyObjectRequest.class));
}
use of software.amazon.awssdk.services.s3.model.Bucket in project beam by apache.
the class S3FileSystemTest method matchNonGlobNullContentEncodingWithOptions.
@Test
public void matchNonGlobNullContentEncodingWithOptions() {
S3FileSystem s3FileSystem = buildMockedS3FileSystem(s3Options());
S3ResourceId path = S3ResourceId.fromUri("s3://testbucket/testdirectory/filethatexists");
long lastModifiedMillis = 1540000000000L;
HeadObjectResponse headObjectResponse = HeadObjectResponse.builder().contentLength(100L).lastModified(Instant.ofEpochMilli(lastModifiedMillis)).contentEncoding(null).build();
when(s3FileSystem.getS3Client().headObject(argThat(new GetHeadObjectRequestMatcher(HeadObjectRequest.builder().bucket(path.getBucket()).key(path.getKey()).build())))).thenReturn(headObjectResponse);
MatchResult result = s3FileSystem.matchNonGlobPath(path);
assertThat(result, MatchResultMatcher.create(ImmutableList.of(MatchResult.Metadata.builder().setSizeBytes(100).setLastModifiedMillis(lastModifiedMillis).setResourceId(path).setIsReadSeekEfficient(true).build())));
}
Aggregations