use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project Singularity by HubSpot.
the class SingularityS3Uploader method multipartUpload.
private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
List<PartETag> partETags = new ArrayList<>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
if (maybeStorageClass.isPresent()) {
initRequest.setStorageClass(maybeStorageClass.get());
}
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
long contentLength = file.length();
long partSize = configuration.getUploadPartSize();
try {
long filePosition = 0;
for (int i = 1; filePosition < contentLength; i++) {
partSize = Math.min(partSize, (contentLength - filePosition));
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(key).withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition).withFile(file).withPartSize(partSize);
partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(completeRequest);
} catch (Exception e) {
s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
Throwables.propagate(e);
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3Transport method sendStream.
protected void sendStream(InputStream input, String key, long streamSize) throws TransportException {
/*
* Create metadata
*/
ObjectMetadata metadata = new ObjectMetadata();
/*
* Find if a multipart upload has already begun or start a new one.
*/
MultiPartUpload upload;
synchronized (multiPartUploads) {
if (!multiPartUploads.containsKey(key)) {
InitiateMultipartUploadRequest uploadRequest = new InitiateMultipartUploadRequest(bucketName, key);
uploadRequest.setObjectMetadata(metadata);
InitiateMultipartUploadResult res = client.initiateMultipartUpload(uploadRequest);
upload = new MultiPartUpload(bucketName, key, res.getUploadId());
multiPartUploads.put(key, upload);
} else {
upload = multiPartUploads.get(key);
}
}
/*
* Write out to S3. Note that the S3 client auto closes the input stream.
*/
UploadPartRequest req = upload.getUploadPartRequest().withInputStream(input).withPartSize(streamSize);
try {
UploadPartResult res = client.uploadPart(req);
upload.addPartETag(res.getPartETag());
} catch (AmazonClientException e) {
client.abortMultipartUpload(upload.getAbortMultipartUploadRequest());
throw new TransportException("unable to put file" + e, e);
} finally {
try {
input.close();
} catch (IOException e) {
logger.warn("error encountered while closing input stream", e);
}
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3TransporterTest method testGzFilename.
@Test
public void testGzFilename() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requests, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename.gz");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3TransporterTest method testAmazonClientException.
@Test(expected = TransportException.class)
public void testAmazonClientException() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = mock(AmazonS3Client.class);
UploadPartResult uploadResult = new UploadPartResult();
uploadResult.setETag("foo");
doThrow(new AmazonClientException("expected")).when(mockClient).uploadPart(any(UploadPartRequest.class));
InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
initUploadResult.setUploadId("123");
doReturn(initUploadResult).when(mockClient).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
try {
transport.sendBatch(buffer, partitions, new TestContext());
} catch (Exception e) {
assertEquals(e.getCause().getClass(), AmazonClientException.class);
throw e;
}
}
use of software.amazon.awssdk.services.s3.model.UploadPartRequest in project bender by Nextdoor.
the class S3TransporterTest method testMultipleUploads.
@Test
public void testMultipleUploads() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer1 = new S3TransportBuffer(1000, false, new S3TransportSerializer());
S3TransportBuffer buffer2 = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").doReturn("bar1").when(mockIevent).getSerialized();
buffer1.add(mockIevent);
buffer2.add(mockIevent);
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
/*
* Do actual test
*/
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
transport.sendBatch(buffer1, partitions, new TestContext());
transport.sendBatch(buffer2, partitions, new TestContext());
verify(mockClient, times(2)).uploadPart(argument.capture());
List<UploadPartRequest> arguments = argument.getAllValues();
assertEquals(1, arguments.get(0).getPartNumber());
// foo\n
assertEquals(4, arguments.get(0).getPartSize());
assertEquals("123", arguments.get(0).getUploadId());
assertEquals(2, arguments.get(1).getPartNumber());
// bar1\n
assertEquals(5, arguments.get(1).getPartSize());
assertEquals("123", arguments.get(1).getUploadId());
}
Aggregations