use of com.amazonaws.AmazonClientException in project herd by FINRAOS.
the class S3DaoTest method testPerformTransferAssertHandleFailedWithAmazonClientException.
@Test
public void testPerformTransferAssertHandleFailedWithAmazonClientException() throws Exception {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
// Shorten the sleep interval for faster tests
long originalSleepIntervalsMillis = (long) ReflectionTestUtils.getField(s3Dao, "sleepIntervalsMillis");
ReflectionTestUtils.setField(s3Dao, "sleepIntervalsMillis", 1l);
try {
S3FileCopyRequestParamsDto s3FileCopyRequestParamsDto = new S3FileCopyRequestParamsDto();
s3FileCopyRequestParamsDto.setSourceBucketName("sourceBucketName");
s3FileCopyRequestParamsDto.setSourceObjectKey("sourceObjectKey");
s3FileCopyRequestParamsDto.setTargetBucketName("targetBucketName");
s3FileCopyRequestParamsDto.setTargetObjectKey("targetObjectKey");
s3FileCopyRequestParamsDto.setKmsKeyId("kmsKeyId");
when(mockS3Operations.copyFile(any(), any())).then(new Answer<Copy>() {
@Override
public Copy answer(InvocationOnMock invocation) throws Throwable {
Copy mockTransfer = mock(Copy.class);
when(mockTransfer.getProgress()).thenReturn(new TransferProgress());
when(mockTransfer.getState()).thenReturn(TransferState.Failed);
when(mockTransfer.isDone()).thenReturn(true);
when(mockTransfer.waitForException()).thenReturn(new AmazonClientException("message"));
return mockTransfer;
}
});
try {
s3Dao.copyFile(s3FileCopyRequestParamsDto);
fail();
} catch (Exception e) {
assertEquals(AmazonClientException.class, e.getClass());
assertEquals("message", e.getMessage());
}
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
ReflectionTestUtils.setField(s3Dao, "sleepIntervalsMillis", originalSleepIntervalsMillis);
}
}
use of com.amazonaws.AmazonClientException in project herd by FINRAOS.
the class S3DaoTest method testDeleteDirectoryAssertHandleAmazonClientException.
@Test
public void testDeleteDirectoryAssertHandleAmazonClientException() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName("s3BucketName");
s3FileTransferRequestParamsDto.setS3KeyPrefix("s3KeyPrefix");
VersionListing versionListing = new VersionListing();
versionListing.getVersionSummaries().add(new S3VersionSummary());
when(mockS3Operations.listVersions(any(), any())).thenReturn(versionListing);
when(mockS3Operations.deleteObjects(any(), any())).thenThrow(new AmazonClientException("message"));
try {
s3Dao.deleteDirectory(s3FileTransferRequestParamsDto);
fail();
} catch (Exception e) {
assertEquals(IllegalStateException.class, e.getClass());
assertEquals("Failed to delete keys/key versions with prefix \"s3KeyPrefix\" from bucket \"s3BucketName\". Reason: message", e.getMessage());
}
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.AmazonClientException in project herd by FINRAOS.
the class SimpleExponentialBackoffStrategyTest method assertExponentialBackoff.
@Test
public void assertExponentialBackoff() {
long delayBeforeNextRetry = simpleExponentialBackoffStrategy.delayBeforeNextRetry(null, new AmazonClientException("test"), 2);
Assert.assertEquals(4000l, delayBeforeNextRetry);
}
use of com.amazonaws.AmazonClientException in project herd by FINRAOS.
the class S3DaoImpl method deleteDirectory.
@Override
public void deleteDirectory(final S3FileTransferRequestParamsDto params) {
LOGGER.info("Deleting keys/key versions from S3... s3KeyPrefix=\"{}\" s3BucketName=\"{}\"", params.getS3KeyPrefix(), params.getS3BucketName());
Assert.isTrue(!isRootKeyPrefix(params.getS3KeyPrefix()), "Deleting from root directory is not allowed.");
try {
// List S3 versions.
List<DeleteObjectsRequest.KeyVersion> keyVersions = listVersions(params);
LOGGER.info("Found keys/key versions in S3 for deletion. s3KeyCount={} s3KeyPrefix=\"{}\" s3BucketName=\"{}\"", keyVersions.size(), params.getS3KeyPrefix(), params.getS3BucketName());
// In order to avoid a MalformedXML AWS exception, we send delete request only when we have any key versions to delete.
if (!keyVersions.isEmpty()) {
// Create an S3 client.
AmazonS3Client s3Client = getAmazonS3(params);
try {
// Delete the key versions.
deleteKeyVersions(s3Client, params.getS3BucketName(), keyVersions);
} finally {
s3Client.shutdown();
}
}
} catch (AmazonClientException e) {
throw new IllegalStateException(String.format("Failed to delete keys/key versions with prefix \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e);
}
}
use of com.amazonaws.AmazonClientException in project kafka-connect-storage-cloud by confluentinc.
the class S3SinkTask method start.
public void start(Map<String, String> props) {
try {
connectorConfig = new S3SinkConnectorConfig(props);
url = connectorConfig.getString(StorageCommonConfig.STORE_URL_CONFIG);
@SuppressWarnings("unchecked") Class<? extends S3Storage> storageClass = (Class<? extends S3Storage>) connectorConfig.getClass(StorageCommonConfig.STORAGE_CLASS_CONFIG);
storage = StorageFactory.createStorage(storageClass, S3SinkConnectorConfig.class, connectorConfig, url);
if (!storage.bucketExists()) {
throw new DataException("No-existent S3 bucket: " + connectorConfig.getBucketName());
}
writerProvider = newFormat().getRecordWriterProvider();
partitioner = newPartitioner(connectorConfig);
open(context.assignment());
log.info("Started S3 connector task with assigned partitions: {}", assignment);
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException | InvocationTargetException | NoSuchMethodException e) {
throw new ConnectException("Reflection exception: ", e);
} catch (AmazonClientException e) {
throw new ConnectException(e);
}
}
Aggregations