use of com.amazonaws.services.s3.model.MultipartUpload in project herd by FINRAOS.
the class MockS3OperationsImpl method getMultipartUpload.
/**
* Creates and returns a mock {@link MultipartUpload} with the given initiated timestamp.
*
* @param initiated the timestamp to set to initiate the object
*
* @return the mock object
*/
private MultipartUpload getMultipartUpload(Date initiated) {
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setInitiated(initiated);
return multipartUpload;
}
use of com.amazonaws.services.s3.model.MultipartUpload in project herd by FINRAOS.
the class S3DaoTest method testAbortMultipartUploadsAssertAbortOnlyBeforeThreshold.
@Test
public void testAbortMultipartUploadsAssertAbortOnlyBeforeThreshold() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
String s3BucketName = "s3BucketName";
String uploadKey = "uploadKey1";
String uploadId = "uploadId1";
Date uploadInitiated = new Date(0);
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
Date thresholdDate = new Date(1);
when(mockS3Operations.listMultipartUploads(any(), any())).then(new Answer<MultipartUploadListing>() {
@Override
public MultipartUploadListing answer(InvocationOnMock invocation) throws Throwable {
ListMultipartUploadsRequest listMultipartUploadsRequest = invocation.getArgument(0);
assertEquals(s3BucketName, listMultipartUploadsRequest.getBucketName());
MultipartUploadListing multipartUploadListing = new MultipartUploadListing();
{
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setUploadId(uploadId);
multipartUpload.setKey(uploadKey);
multipartUpload.setInitiated(uploadInitiated);
multipartUploadListing.getMultipartUploads().add(multipartUpload);
}
// This upload is not aborted since the initiated date is greater than the threshold
{
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setUploadId("uploadId2");
multipartUpload.setKey("uploadKey2");
multipartUpload.setInitiated(new Date(2));
multipartUploadListing.getMultipartUploads().add(multipartUpload);
}
return multipartUploadListing;
}
});
assertEquals(1, s3Dao.abortMultipartUploads(s3FileTransferRequestParamsDto, thresholdDate));
verify(mockS3Operations).listMultipartUploads(any(), any());
/*
* Assert that S3Operations.abortMultipartUpload is called exactly ONCE with arguments matching the given ArgumentMatcher
*/
verify(mockS3Operations).abortMultipartUpload(argThat(argument -> Objects.equal(s3BucketName, argument.getBucketName()) && Objects.equal(uploadKey, argument.getKey()) && Objects.equal(uploadId, argument.getUploadId())), any());
// Assert that no other interactions occur with the mock
verifyNoMoreInteractions(mockS3Operations);
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.services.s3.model.MultipartUpload in project herd by FINRAOS.
the class S3DaoTest method testAbortMultipartUploadsAssertTruncatedResult.
@Test
public void testAbortMultipartUploadsAssertTruncatedResult() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
String s3BucketName = "s3BucketName";
String uploadKey = "uploadKey";
String uploadId = "uploadId";
Date uploadInitiated = new Date(0);
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
Date thresholdDate = new Date(1);
when(mockS3Operations.listMultipartUploads(any(), any())).then(new Answer<MultipartUploadListing>() {
@Override
public MultipartUploadListing answer(InvocationOnMock invocation) throws Throwable {
ListMultipartUploadsRequest listMultipartUploadsRequest = invocation.getArgument(0);
String keyMarker = listMultipartUploadsRequest.getKeyMarker();
String uploadIdMarker = listMultipartUploadsRequest.getUploadIdMarker();
MultipartUploadListing multipartUploadListing = new MultipartUploadListing();
if (keyMarker == null || uploadIdMarker == null) {
multipartUploadListing.setNextKeyMarker("nextKeyMarker");
multipartUploadListing.setNextUploadIdMarker("nextUploadIdMarker");
multipartUploadListing.setTruncated(true);
} else {
assertEquals("nextKeyMarker", keyMarker);
assertEquals("nextUploadIdMarker", uploadIdMarker);
MultipartUpload multipartUpload = new MultipartUpload();
multipartUpload.setUploadId(uploadId);
multipartUpload.setKey(uploadKey);
multipartUpload.setInitiated(uploadInitiated);
multipartUploadListing.getMultipartUploads().add(multipartUpload);
}
return multipartUploadListing;
}
});
assertEquals(1, s3Dao.abortMultipartUploads(s3FileTransferRequestParamsDto, thresholdDate));
// Assert listMultipartUploads() is called twice due to truncation
verify(mockS3Operations, times(2)).listMultipartUploads(any(), any());
/*
* Assert that S3Operations.abortMultipartUpload is called exactly ONCE with arguments matching the given ArgumentMatcher
*/
verify(mockS3Operations).abortMultipartUpload(argThat(argument -> Objects.equal(s3BucketName, argument.getBucketName()) && Objects.equal(uploadKey, argument.getKey()) && Objects.equal(uploadId, argument.getUploadId())), any());
// Assert that no other interactions occur with the mock
verifyNoMoreInteractions(mockS3Operations);
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.services.s3.model.MultipartUpload in project nifi by apache.
the class ITPutS3Object method testStateRemove.
@Test
public void testStateRemove() throws IOException {
final PutS3Object processor = new PutS3Object();
final TestRunner runner = TestRunners.newTestRunner(processor);
final String bucket = runner.getProcessContext().getProperty(PutS3Object.BUCKET).getValue();
final String key = runner.getProcessContext().getProperty(PutS3Object.KEY).getValue();
final String cacheKey = runner.getProcessor().getIdentifier() + "/" + bucket + "/" + key + "-sr";
final List<MultipartUpload> uploadList = new ArrayList<>();
final MultipartUpload upload1 = new MultipartUpload();
upload1.setKey(key);
upload1.setUploadId("1234");
uploadList.add(upload1);
final MultipartUploadListing uploadListing = new MultipartUploadListing();
uploadListing.setMultipartUploads(uploadList);
final MockAmazonS3Client mockClient = new MockAmazonS3Client();
mockClient.setListing(uploadListing);
/*
* store state, retrieve and validate, remove and validate
*/
PutS3Object.MultipartState stateOrig = new PutS3Object.MultipartState();
stateOrig.setUploadId("1234");
stateOrig.setContentLength(1234L);
processor.persistLocalState(cacheKey, stateOrig);
PutS3Object.MultipartState state1 = processor.getLocalStateIfInS3(mockClient, bucket, cacheKey);
Assert.assertEquals("1234", state1.getUploadId());
Assert.assertEquals(1234L, state1.getContentLength().longValue());
processor.persistLocalState(cacheKey, null);
PutS3Object.MultipartState state2 = processor.getLocalStateIfInS3(mockClient, bucket, cacheKey);
Assert.assertNull(state2);
}
use of com.amazonaws.services.s3.model.MultipartUpload in project Singularity by HubSpot.
the class SingularityS3Uploader method uploadSingle.
protected void uploadSingle(int sequence, Path file) throws Exception {
Retryer<Boolean> retryer = RetryerBuilder.<Boolean>newBuilder().retryIfExceptionOfType(AmazonS3Exception.class).retryIfRuntimeException().withWaitStrategy(WaitStrategies.fixedWait(configuration.getRetryWaitMs(), TimeUnit.MILLISECONDS)).withStopStrategy(StopStrategies.stopAfterAttempt(configuration.getRetryCount())).build();
retryer.call(() -> {
final long start = System.currentTimeMillis();
final String key = SingularityS3FormatHelper.getKey(uploadMetadata.getS3KeyFormat(), sequence, Files.getLastModifiedTime(file).toMillis(), Objects.toString(file.getFileName()), hostname);
long fileSizeBytes = Files.size(file);
LOG.info("{} Uploading {} to {}/{} (size {})", logIdentifier, file, bucketName, key, fileSizeBytes);
try {
ObjectMetadata objectMetadata = new ObjectMetadata();
UploaderFileAttributes fileAttributes = getFileAttributes(file);
if (fileAttributes.getStartTime().isPresent()) {
objectMetadata.addUserMetadata(SingularityS3Log.LOG_START_S3_ATTR, fileAttributes.getStartTime().get().toString());
LOG.debug("Added extra metadata for object ({}:{})", SingularityS3Log.LOG_START_S3_ATTR, fileAttributes.getStartTime().get());
}
if (fileAttributes.getEndTime().isPresent()) {
objectMetadata.addUserMetadata(SingularityS3Log.LOG_END_S3_ATTR, fileAttributes.getEndTime().get().toString());
LOG.debug("Added extra metadata for object ({}:{})", SingularityS3Log.LOG_END_S3_ATTR, fileAttributes.getEndTime().get());
}
for (SingularityS3UploaderContentHeaders contentHeaders : configuration.getS3ContentHeaders()) {
if (file.toString().endsWith(contentHeaders.getFilenameEndsWith())) {
LOG.debug("{} Using content headers {} for file {}", logIdentifier, contentHeaders, file);
if (contentHeaders.getContentType().isPresent()) {
objectMetadata.setContentType(contentHeaders.getContentType().get());
}
if (contentHeaders.getContentEncoding().isPresent()) {
objectMetadata.setContentEncoding(contentHeaders.getContentEncoding().get());
}
break;
}
}
Optional<StorageClass> maybeStorageClass = Optional.absent();
if (shouldApplyStorageClass(fileSizeBytes, uploadMetadata.getS3StorageClass())) {
LOG.debug("{} adding storage class {} to {}", logIdentifier, uploadMetadata.getS3StorageClass().get(), file);
maybeStorageClass = Optional.of(StorageClass.fromValue(uploadMetadata.getS3StorageClass().get()));
}
LOG.debug("Uploading object with metadata {}", objectMetadata);
if (fileSizeBytes > configuration.getMaxSingleUploadSizeBytes()) {
multipartUpload(key, file.toFile(), objectMetadata, maybeStorageClass);
} else {
PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, file.toFile()).withMetadata(objectMetadata);
if (maybeStorageClass.isPresent()) {
putObjectRequest.setStorageClass(maybeStorageClass.get());
}
if (uploadMetadata.getEncryptionKey().isPresent()) {
putObjectRequest.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(uploadMetadata.getEncryptionKey().get()));
}
s3Client.putObject(putObjectRequest);
}
} catch (AmazonS3Exception se) {
LOG.warn("{} Couldn't upload {} due to {} - {}", logIdentifier, file, se.getErrorCode(), se.getErrorMessage(), se);
throw se;
} catch (Exception e) {
LOG.warn("Exception uploading {}", file, e);
throw e;
}
LOG.info("{} Uploaded {} in {}", logIdentifier, key, JavaUtils.duration(start));
return true;
});
}
Aggregations