use of com.amazonaws.AmazonClientException in project nifi by apache.
the class PutS3Object method getS3AgeoffListAndAgeoffLocalState.
protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context, final AmazonS3Client s3, final long now) {
final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions().getValue();
final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
final long ageCutoff = now - maxAge;
final List<MultipartUpload> ageoffList = new ArrayList<>();
if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
try {
ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
for (MultipartUpload upload : listing.getMultipartUploads()) {
long uploadTime = upload.getInitiated().getTime();
if (uploadTime < ageCutoff) {
ageoffList.add(upload);
}
}
// ageoff any local state
ageoffLocalState(ageCutoff);
lastS3AgeOff.set(System.currentTimeMillis());
} catch (AmazonClientException e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == 403 && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} " + "** The configured user does not have the s3:ListBucketMultipartUploads permission " + "for this bucket, S3 ageoff cannot occur without this permission. Next ageoff check " + "time is being advanced by interval to prevent checking on every upload **", new Object[] { bucket, e.getMessage() });
lastS3AgeOff.set(System.currentTimeMillis());
} else {
getLogger().error("Error checking S3 Multipart Upload list for {}: {}", new Object[] { bucket, e.getMessage() });
}
} finally {
s3BucketLock.unlock();
}
}
MultipartUploadListing result = new MultipartUploadListing();
result.setBucketName(bucket);
result.setMultipartUploads(ageoffList);
return result;
}
use of com.amazonaws.AmazonClientException in project nifi by apache.
the class PutS3Object method abortS3MultipartUpload.
protected void abortS3MultipartUpload(final AmazonS3Client s3, final String bucket, final MultipartUpload upload) {
final String uploadKey = upload.getKey();
final String uploadId = upload.getUploadId();
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucket, uploadKey, uploadId);
try {
s3.abortMultipartUpload(abortRequest);
getLogger().info("Aborting out of date multipart upload, bucket {} key {} ID {}, initiated {}", new Object[] { bucket, uploadKey, uploadId, logFormat.format(upload.getInitiated()) });
} catch (AmazonClientException ace) {
getLogger().info("Error trying to abort multipart upload from bucket {} with key {} and ID {}: {}", new Object[] { bucket, uploadKey, uploadId, ace.getMessage() });
}
}
use of com.amazonaws.AmazonClientException in project nifi by apache.
the class DeleteDynamoDBTest method testStringHashStringRangeDeleteThrowsClientException.
@Test
public void testStringHashStringRangeDeleteThrowsClientException() {
final DynamoDB mockDynamoDB = new DynamoDB(Regions.AP_NORTHEAST_1) {
@Override
public BatchWriteItemOutcome batchWriteItem(TableWriteItems... tableWriteItems) {
throw new AmazonClientException("clientException");
}
};
deleteDynamoDB = new DeleteDynamoDB() {
@Override
protected DynamoDB getDynamoDB() {
return mockDynamoDB;
}
};
final TestRunner deleteRunner = TestRunners.newTestRunner(deleteDynamoDB);
deleteRunner.setProperty(AbstractDynamoDBProcessor.ACCESS_KEY, "abcd");
deleteRunner.setProperty(AbstractDynamoDBProcessor.SECRET_KEY, "cdef");
deleteRunner.setProperty(AbstractDynamoDBProcessor.REGION, REGION);
deleteRunner.setProperty(AbstractDynamoDBProcessor.TABLE, stringHashStringRangeTableName);
deleteRunner.setProperty(AbstractDynamoDBProcessor.HASH_KEY_NAME, "hashS");
deleteRunner.setProperty(AbstractDynamoDBProcessor.HASH_KEY_VALUE, "h1");
deleteRunner.setProperty(AbstractDynamoDBProcessor.RANGE_KEY_NAME, "rangeS");
deleteRunner.setProperty(AbstractDynamoDBProcessor.RANGE_KEY_VALUE, "r1");
deleteRunner.enqueue(new byte[] {});
deleteRunner.run(1);
deleteRunner.assertAllFlowFilesTransferred(AbstractDynamoDBProcessor.REL_FAILURE, 1);
List<MockFlowFile> flowFiles = deleteRunner.getFlowFilesForRelationship(AbstractDynamoDBProcessor.REL_FAILURE);
for (MockFlowFile flowFile : flowFiles) {
assertEquals("clientException", flowFile.getAttribute(AbstractDynamoDBProcessor.DYNAMODB_ERROR_EXCEPTION_MESSAGE));
}
}
use of com.amazonaws.AmazonClientException in project ORCID-Source by ORCID.
the class S3MessageProcessor method update_2_0_API.
private void update_2_0_API(BaseMessage message) {
String orcid = message.getOrcid();
if (is20IndexingEnabled) {
// Update API 2.0
try {
Record record = orcid20ApiClient.fetchPublicRecord(message);
if (record != null) {
s3Updater.updateS3(orcid, record);
recordStatusManager.markAsSent(orcid, AvailableBroker.DUMP_STATUS_2_0_API);
}
} catch (LockedRecordException | DeprecatedRecordException e) {
try {
OrcidError error = null;
if (e instanceof LockedRecordException) {
LOG.error("Record " + orcid + " is locked");
error = ((LockedRecordException) e).getOrcidError();
} else {
LOG.error("Record " + orcid + " is deprecated");
error = ((DeprecatedRecordException) e).getOrcidError();
}
exceptionHandler.handle20Exception(orcid, error);
recordStatusManager.markAsSent(orcid, AvailableBroker.DUMP_STATUS_2_0_API);
} catch (Exception e1) {
LOG.error("Unable to handle LockedRecordException for record " + orcid, e1);
recordStatusManager.markAsFailed(orcid, AvailableBroker.DUMP_STATUS_2_0_API);
}
} catch (AmazonClientException e) {
LOG.error("Unable to fetch record " + orcid + " for 2.0 API: " + e.getMessage(), e);
recordStatusManager.markAsFailed(orcid, AvailableBroker.DUMP_STATUS_1_2_API);
} catch (Exception e) {
// something else went wrong fetching record from ORCID and
// threw a
// runtime exception
LOG.error("Unable to fetch record " + orcid + " for 2.0 API: " + e.getMessage(), e);
recordStatusManager.markAsFailed(orcid, AvailableBroker.DUMP_STATUS_2_0_API);
}
}
}
use of com.amazonaws.AmazonClientException in project jackrabbit-oak by apache.
the class S3Backend method init.
public void init() throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
startTime = new Date();
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
LOG.debug("init");
s3ReqDecorator = new S3RequestDecorator(properties);
s3service = Utils.openService(properties);
if (bucket == null || "".equals(bucket.trim())) {
bucket = properties.getProperty(S3Constants.S3_BUCKET);
// Alternately check if the 'container' property is set
if (Strings.isNullOrEmpty(bucket)) {
bucket = properties.getProperty(S3Constants.S3_CONTAINER);
}
}
String region = properties.getProperty(S3Constants.S3_REGION);
Region s3Region;
if (StringUtils.isNullOrEmpty(region)) {
com.amazonaws.regions.Region ec2Region = Regions.getCurrentRegion();
if (ec2Region != null) {
s3Region = Region.fromValue(ec2Region.getName());
} else {
throw new AmazonClientException("parameter [" + S3Constants.S3_REGION + "] not configured and cannot be derived from environment");
}
} else {
if (Utils.DEFAULT_AWS_BUCKET_REGION.equals(region)) {
s3Region = Region.US_Standard;
} else if (Region.EU_Ireland.toString().equals(region)) {
s3Region = Region.EU_Ireland;
} else {
s3Region = Region.fromValue(region);
}
}
if (!s3service.doesBucketExist(bucket)) {
s3service.createBucket(bucket, s3Region);
LOG.info("Created bucket [{}] in [{}] ", bucket, region);
} else {
LOG.info("Using bucket [{}] in [{}] ", bucket, region);
}
int writeThreads = 10;
String writeThreadsStr = properties.getProperty(S3Constants.S3_WRITE_THREADS);
if (writeThreadsStr != null) {
writeThreads = Integer.parseInt(writeThreadsStr);
}
LOG.info("Using thread pool of [{}] threads in S3 transfer manager.", writeThreads);
tmx = new TransferManager(s3service, Executors.newFixedThreadPool(writeThreads, new NamedThreadFactory("s3-transfer-manager-worker")));
String renameKeyProp = properties.getProperty(S3Constants.S3_RENAME_KEYS);
boolean renameKeyBool = (renameKeyProp == null || "".equals(renameKeyProp)) ? false : Boolean.parseBoolean(renameKeyProp);
LOG.info("Rename keys [{}]", renameKeyBool);
if (renameKeyBool) {
renameKeys();
}
LOG.debug("S3 Backend initialized in [{}] ms", +(System.currentTimeMillis() - startTime.getTime()));
} catch (Exception e) {
LOG.error("Error ", e);
Map<String, Object> filteredMap = Maps.newHashMap();
if (properties != null) {
filteredMap = Maps.filterKeys(Utils.asMap(properties), new Predicate<String>() {
@Override
public boolean apply(String input) {
return !input.equals(S3Constants.ACCESS_KEY) && !input.equals(S3Constants.SECRET_KEY);
}
});
}
throw new DataStoreException("Could not initialize S3 from " + filteredMap, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
Aggregations