use of com.amazonaws.services.s3.model.MultipartUpload in project nifi by apache.
the class PutS3Object method getS3AgeoffListAndAgeoffLocalState.
protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context, final AmazonS3Client s3, final long now) {
final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions().getValue();
final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
final long ageCutoff = now - maxAge;
final List<MultipartUpload> ageoffList = new ArrayList<>();
if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
try {
ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
for (MultipartUpload upload : listing.getMultipartUploads()) {
long uploadTime = upload.getInitiated().getTime();
if (uploadTime < ageCutoff) {
ageoffList.add(upload);
}
}
// ageoff any local state
ageoffLocalState(ageCutoff);
lastS3AgeOff.set(System.currentTimeMillis());
} catch (AmazonClientException e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == 403 && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} " + "** The configured user does not have the s3:ListBucketMultipartUploads permission " + "for this bucket, S3 ageoff cannot occur without this permission. Next ageoff check " + "time is being advanced by interval to prevent checking on every upload **", new Object[] { bucket, e.getMessage() });
lastS3AgeOff.set(System.currentTimeMillis());
} else {
getLogger().error("Error checking S3 Multipart Upload list for {}: {}", new Object[] { bucket, e.getMessage() });
}
} finally {
s3BucketLock.unlock();
}
}
MultipartUploadListing result = new MultipartUploadListing();
result.setBucketName(bucket);
result.setMultipartUploads(ageoffList);
return result;
}
use of com.amazonaws.services.s3.model.MultipartUpload in project nifi by apache.
the class PutS3Object method abortS3MultipartUpload.
protected void abortS3MultipartUpload(final AmazonS3Client s3, final String bucket, final MultipartUpload upload) {
final String uploadKey = upload.getKey();
final String uploadId = upload.getUploadId();
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucket, uploadKey, uploadId);
try {
s3.abortMultipartUpload(abortRequest);
getLogger().info("Aborting out of date multipart upload, bucket {} key {} ID {}, initiated {}", new Object[] { bucket, uploadKey, uploadId, logFormat.format(upload.getInitiated()) });
} catch (AmazonClientException ace) {
getLogger().info("Error trying to abort multipart upload from bucket {} with key {} and ID {}: {}", new Object[] { bucket, uploadKey, uploadId, ace.getMessage() });
}
}
use of com.amazonaws.services.s3.model.MultipartUpload in project bender by Nextdoor.
the class S3TransportFactory method close.
@Override
public void close() {
Exception e = null;
for (MultiPartUpload upload : this.pendingMultiPartUploads.values()) {
if (e == null) {
CompleteMultipartUploadRequest req = upload.getCompleteMultipartUploadRequest();
try {
this.client.completeMultipartUpload(req);
} catch (AmazonS3Exception e1) {
logger.error("failed to complete multi-part upload for " + upload.getKey() + " parts " + upload.getPartCount(), e1);
e = e1;
}
} else {
logger.warn("aborting upload for: " + upload.getKey());
AbortMultipartUploadRequest req = upload.getAbortMultipartUploadRequest();
try {
this.client.abortMultipartUpload(req);
} catch (AmazonS3Exception e1) {
logger.error("failed to abort multi-part upload", e1);
}
}
}
this.pendingMultiPartUploads.clear();
if (e != null) {
throw new RuntimeException("failed while closing transport", e);
}
}
use of com.amazonaws.services.s3.model.MultipartUpload in project bender by Nextdoor.
the class S3TransporterTest method testUnpartitioned.
@Test
public void testUnpartitioned() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/a_filename", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
// foo\n
assertEquals(4, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
}
use of com.amazonaws.services.s3.model.MultipartUpload in project bender by Nextdoor.
the class S3TransporterTest method testCompressed.
@Test
public void testCompressed() throws TransportException, IllegalStateException, IOException {
/*
* Create mock client, requets, and replies
*/
AmazonS3Client mockClient = getMockClient();
/*
* Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
* InputStream and makes it unavailable for reading.
*/
ByteArrayOutputStream captured = new ByteArrayOutputStream();
Answer answer = new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
captured.write(req.getInputStream());
return new UploadPartResult();
}
};
Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));
/*
* Fill buffer with mock data
*/
S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
InternalEvent mockIevent = mock(InternalEvent.class);
doReturn("foo").when(mockIevent).getSerialized();
/*
* Create transport
*/
Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
S3Transport transport = new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);
/*
* Do actual test
*/
buffer.add(mockIevent);
LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
partitions.put(S3Transport.FILENAME_KEY, "a_filename");
ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
buffer.close();
transport.sendBatch(buffer, partitions, new TestContext());
verify(mockClient).uploadPart(argument.capture());
/*
* Check results
*/
assertEquals("bucket", argument.getValue().getBucketName());
assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
assertEquals(1, argument.getValue().getPartNumber());
assertEquals(40, argument.getValue().getPartSize());
assertEquals("123", argument.getValue().getUploadId());
/*
* Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
* and verified.
*/
byte[] actualBytes = captured.toByteArray();
byte[] expectedBytes = { 66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0, -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54 };
assertArrayEquals(expectedBytes, actualBytes);
}
Aggregations