use of com.amazonaws.services.s3.model.AmazonS3Exception in project herd by FINRAOS.
the class S3DaoImpl method listDirectory.
@Override
public List<S3ObjectSummary> listDirectory(final S3FileTransferRequestParamsDto params, boolean ignoreZeroByteDirectoryMarkers) {
Assert.isTrue(!isRootKeyPrefix(params.getS3KeyPrefix()), "Listing of S3 objects from root directory is not allowed.");
AmazonS3Client s3Client = getAmazonS3(params);
List<S3ObjectSummary> s3ObjectSummaries = new ArrayList<>();
try {
ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(params.getS3BucketName()).withPrefix(params.getS3KeyPrefix());
ObjectListing objectListing;
do {
objectListing = s3Operations.listObjects(listObjectsRequest, s3Client);
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
// Ignore 0 byte objects that represent S3 directories.
if (!(ignoreZeroByteDirectoryMarkers && objectSummary.getKey().endsWith("/") && objectSummary.getSize() == 0L)) {
s3ObjectSummaries.add(objectSummary);
}
}
listObjectsRequest.setMarker(objectListing.getNextMarker());
} while (objectListing.isTruncated());
} catch (AmazonS3Exception amazonS3Exception) {
if (S3Operations.ERROR_CODE_NO_SUCH_BUCKET.equals(amazonS3Exception.getErrorCode())) {
throw new IllegalArgumentException("The specified bucket '" + params.getS3BucketName() + "' does not exist.", amazonS3Exception);
}
throw new IllegalStateException("Error accessing S3", amazonS3Exception);
} catch (AmazonClientException e) {
throw new IllegalStateException(String.format("Failed to list keys with prefix \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e);
} finally {
// Shutdown the AmazonS3Client instance to release resources.
s3Client.shutdown();
}
return s3ObjectSummaries;
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project herd by FINRAOS.
the class S3DaoTest method testListVersionsAssertHandleGenericAmazonS3Exception.
@Test
public void testListVersionsAssertHandleGenericAmazonS3Exception() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
String s3BucketName = "s3BucketName";
String s3KeyPrefix = "s3KeyPrefix";
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);
when(mockS3Operations.listVersions(any(), any())).thenThrow(new AmazonS3Exception("message"));
try {
s3Dao.listVersions(s3FileTransferRequestParamsDto);
fail();
} catch (Exception e) {
assertEquals(IllegalStateException.class, e.getClass());
assertEquals("Error accessing S3", e.getMessage());
}
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateTags.
/**
* Calls getBucketTaggingConfiguration() on every bucket to enumerate bucket tags.
* getBucketTaggingConfiguration() method is region aware and only returns valid results when we
* call it with the client region same as the S3 bucket region. Since S3 bucket region is not
* available through API, when a new bucket is discovered, we try to get tags for it by calling
* getBucketTaggingConfiguration() with client in every AWS region. We then store the client region
* for which we received successful response as region in DiskState for that S3 bucket. This region
* in DiskState is then used for any subsequent calls for enumerating tags.
*/
private void enumerateTags(S3StorageEnumerationContext aws, S3StorageEnumerationSubStage next) {
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
OperationContext.restoreOperationContext(operationContext);
List<DeferredResult<S3ClientHandler>> s3ClientsEnumeratePreviousBuckets = new ArrayList<>();
// Get a client for that region and make a call to enumerate tags.
for (Map.Entry<String, DiskState> entry : aws.diskStatesToBeUpdatedByBucketName.entrySet()) {
// with valid region in subsequent enumeration runs.
if (entry.getValue().regionId != null) {
aws.regionsByBucketName.put(entry.getValue().id, entry.getValue().regionId);
} else {
logWarning("Null region found in S3 diskState");
Operation.createDelete(aws.service.getHost(), entry.getValue().documentSelfLink).setReferer(aws.service.getUri()).setBody(getDeletionState(Utils.getNowMicrosUtc())).setCompletion((o, e) -> {
if (e != null) {
logWarning("Exception deleting diskState with null " + "region [ex=%s]", e.getMessage());
return;
}
logWarning("Deleted diskState with null region [diskState=%s]", Utils.toJsonHtml(entry.getValue()));
}).sendWith(aws.service);
continue;
}
s3ClientsEnumeratePreviousBuckets.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, entry.getValue().regionId, aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.bucketName = entry.getKey();
s3ClientHandler.diskState = entry.getValue();
return s3ClientHandler;
}));
}
// Handler to enumerate new buckets not previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumerateNewBuckets = (s3ClientHandlers, throwable) -> {
OperationContext.restoreOperationContext(operationContext);
for (Bucket bucket : aws.bucketsToBeCreated) {
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(bucket.getName());
aws.regionsByBucketName.put(bucket.getName(), s3ClientHandler.region.getName());
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(bucket.getName(), new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(bucket.getName()).putAll(tagSet.getAllTags()));
}
break;
} catch (Exception e) {
// getbucketTaggingConfiguration().
if (e instanceof AmazonS3Exception && (((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_MOVED_PERM || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN || ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_BAD_REQUEST)) {
continue;
} else {
logSevere("Exception enumerating tags for S3 bucket with unknown region " + "[endpoint=%s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.region.getName(), e.getMessage());
continue;
}
}
}
}
// Once completed, move to next stage.
aws.subStage = next;
handleReceivedEnumerationData(aws);
};
// Handler to enumerate tags for buckets already previously enumerated.
BiConsumer<List<S3ClientHandler>, Throwable> enumeratePreviousBuckets = (s3ClientHandlers, ignored) -> {
OperationContext.restoreOperationContext(operationContext);
for (S3ClientHandler s3ClientHandler : s3ClientHandlers) {
try {
BucketTaggingConfiguration bucketTaggingConfiguration = s3ClientHandler.s3Client.getBucketTaggingConfiguration(s3ClientHandler.bucketName);
if (bucketTaggingConfiguration != null) {
aws.tagsByBucketName.put(s3ClientHandler.bucketName, new ConcurrentHashMap<>());
bucketTaggingConfiguration.getAllTagSets().forEach(tagSet -> aws.tagsByBucketName.get(s3ClientHandler.bucketName).putAll(tagSet.getAllTags()));
}
} catch (Exception e) {
logSevere("Exception enumerating tags for S3 bucket with known region " + "[endpoint=%s] [bucketName=%s - %s] [region=%s] [ex=%s]", aws.request.original.endpointLink, s3ClientHandler.bucketName, s3ClientHandler.diskState.id, s3ClientHandler.diskState.regionId, e.getMessage());
}
}
// For remaining buckets, they have yet to be enumerated. Brute force and try to
// enumerate tags for these buckets over every region until we find the correct
// region and then store it in DiskState for future reference.
List<DeferredResult<S3ClientHandler>> s3ClientBucketsToEnumerate = new ArrayList<>();
for (Regions region : Regions.values()) {
// Get an s3 client in the region asynchronously. Once all completed, these
// clients will be used to test each bucket.
s3ClientBucketsToEnumerate.add(aws.clientManager.getOrCreateS3ClientAsync(aws.endpointAuth, region.getName(), aws.service).thenApply(s3Client -> {
S3ClientHandler s3ClientHandler = new S3ClientHandler();
s3ClientHandler.s3Client = s3Client;
s3ClientHandler.region = region;
return s3ClientHandler;
}));
}
// Once finished, attempt to enumerate each of the "new" buckets.
DeferredResult.allOf(s3ClientBucketsToEnumerate).whenComplete(enumerateNewBuckets);
};
// Enumerate tags of previously enumerated buckets first
DeferredResult.allOf(s3ClientsEnumeratePreviousBuckets).whenComplete(enumeratePreviousBuckets);
});
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project photon-model by vmware.
the class AWSS3StorageEnumerationAdapterService method enumerateS3Buckets.
/**
* Call the listBuckets() method to enumerate S3 buckets.
* AWS SDK does not have an async method for listing buckets, so we use the synchronous method
* in a fixed thread pool for S3 enumeration service.
* If listBuckets() call fails due to unsupported region, we mark the S3 client invalid,
* stop the enumeration flow and patch back to parent.
*/
private void enumerateS3Buckets(S3StorageEnumerationContext aws) {
logInfo(() -> String.format("Running creation enumeration in refresh mode for %s", aws.request.original.resourceReference));
OperationContext operationContext = OperationContext.getOperationContext();
this.executorService.submit(() -> {
try {
List<Bucket> bucketList = aws.amazonS3Client.listBuckets();
for (Bucket bucket : bucketList) {
aws.remoteBucketsByBucketName.put(bucket.getName(), bucket);
}
OperationContext.restoreOperationContext(operationContext);
if (aws.remoteBucketsByBucketName.isEmpty()) {
aws.subStage = S3StorageEnumerationSubStage.DELETE_DISKS;
}
handleReceivedEnumerationData(aws);
} catch (Exception e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == Operation.STATUS_CODE_FORBIDDEN) {
markClientInvalid(aws);
} else {
logSevere("Exception enumerating S3 buckets for [ex=%s]", e.getMessage());
aws.error = e;
aws.stage = S3StorageEnumerationStages.ERROR;
handleEnumerationRequest(aws);
}
}
});
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project photon-model by vmware.
the class AWSCostStatsService method downloadParseAndCreateStats.
private void downloadParseAndCreateStats(AWSCostStatsCreationContext statsData, String awsBucketName) throws IOException {
try {
// Creating a working directory for downloading and processing the bill
final Path workingDirPath = Paths.get(System.getProperty(TEMP_DIR_LOCATION), UUID.randomUUID().toString());
Files.createDirectories(workingDirPath);
AWSCsvBillParser parser = new AWSCsvBillParser();
final String csvBillZipFileName = parser.getCsvBillFileName(statsData.billMonthToDownload, statsData.accountId, true);
Path csvBillZipFilePath = Paths.get(workingDirPath.toString(), csvBillZipFileName);
ProgressListener listener = new ProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent) {
try {
ProgressEventType eventType = progressEvent.getEventType();
if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(eventType)) {
OperationContext.restoreOperationContext(statsData.opContext);
LocalDate billMonth = new LocalDate(statsData.billMonthToDownload.getYear(), statsData.billMonthToDownload.getMonthOfYear(), 1);
logWithContext(statsData, Level.INFO, () -> String.format("Processing" + " bill for the month: %s.", billMonth));
parser.parseDetailedCsvBill(statsData.ignorableInvoiceCharge, csvBillZipFilePath, statsData.awsAccountIdToComputeStates.keySet(), getHourlyStatsConsumer(billMonth, statsData), getMonthlyStatsConsumer(billMonth, statsData));
deleteTempFiles();
// Continue downloading and processing the bills for past and current months' bills
statsData.billMonthToDownload = statsData.billMonthToDownload.plusMonths(1);
handleCostStatsCreationRequest(statsData);
} else if (ProgressEventType.TRANSFER_FAILED_EVENT.equals(eventType)) {
deleteTempFiles();
billDownloadFailureHandler(statsData, awsBucketName, new IOException("Download of AWS CSV Bill '" + csvBillZipFileName + "' failed."));
}
} catch (Exception exception) {
deleteTempFiles();
billDownloadFailureHandler(statsData, awsBucketName, exception);
}
}
private void deleteTempFiles() {
try {
Files.deleteIfExists(csvBillZipFilePath);
Files.deleteIfExists(workingDirPath);
} catch (IOException e) {
// Ignore IO exception while cleaning files.
}
}
};
GetObjectRequest getObjectRequest = new GetObjectRequest(awsBucketName, csvBillZipFileName).withGeneralProgressListener(listener);
statsData.s3Client.download(getObjectRequest, csvBillZipFilePath.toFile());
} catch (AmazonS3Exception s3Exception) {
billDownloadFailureHandler(statsData, awsBucketName, s3Exception);
}
}
Aggregations