use of com.amazonaws.services.s3.model.AmazonS3Exception in project nifi by apache.
the class PutS3Object method getS3AgeoffListAndAgeoffLocalState.
protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context, final AmazonS3Client s3, final long now) {
final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions().getValue();
final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
final long ageCutoff = now - maxAge;
final List<MultipartUpload> ageoffList = new ArrayList<>();
if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
try {
ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
for (MultipartUpload upload : listing.getMultipartUploads()) {
long uploadTime = upload.getInitiated().getTime();
if (uploadTime < ageCutoff) {
ageoffList.add(upload);
}
}
// ageoff any local state
ageoffLocalState(ageCutoff);
lastS3AgeOff.set(System.currentTimeMillis());
} catch (AmazonClientException e) {
if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == 403 && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} " + "** The configured user does not have the s3:ListBucketMultipartUploads permission " + "for this bucket, S3 ageoff cannot occur without this permission. Next ageoff check " + "time is being advanced by interval to prevent checking on every upload **", new Object[] { bucket, e.getMessage() });
lastS3AgeOff.set(System.currentTimeMillis());
} else {
getLogger().error("Error checking S3 Multipart Upload list for {}: {}", new Object[] { bucket, e.getMessage() });
}
} finally {
s3BucketLock.unlock();
}
}
MultipartUploadListing result = new MultipartUploadListing();
result.setBucketName(bucket);
result.setMultipartUploads(ageoffList);
return result;
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project herd by FINRAOS.
the class S3DaoTest method testListDirectoryAssertHandleGenericAmazonS3Exception.
@Test
public void testListDirectoryAssertHandleGenericAmazonS3Exception() {
S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
S3Operations mockS3Operations = mock(S3Operations.class);
ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);
try {
String s3BucketName = "s3BucketName";
String s3KeyPrefix = "s3KeyPrefix";
S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);
boolean ignoreZeroByteDirectoryMarkers = true;
when(mockS3Operations.listObjects(any(), any())).thenThrow(new AmazonS3Exception("message"));
try {
s3Dao.listDirectory(s3FileTransferRequestParamsDto, ignoreZeroByteDirectoryMarkers);
fail();
} catch (Exception e) {
assertEquals(IllegalStateException.class, e.getClass());
assertEquals("Error accessing S3", e.getMessage());
}
} finally {
ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
}
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project bender by Nextdoor.
the class S3TransportFactory method close.
@Override
public void close() {
Exception e = null;
for (MultiPartUpload upload : this.pendingMultiPartUploads.values()) {
if (e == null) {
CompleteMultipartUploadRequest req = upload.getCompleteMultipartUploadRequest();
try {
this.client.completeMultipartUpload(req);
} catch (AmazonS3Exception e1) {
logger.error("failed to complete multi-part upload for " + upload.getKey() + " parts " + upload.getPartCount(), e1);
e = e1;
}
} else {
logger.warn("aborting upload for: " + upload.getKey());
AbortMultipartUploadRequest req = upload.getAbortMultipartUploadRequest();
try {
this.client.abortMultipartUpload(req);
} catch (AmazonS3Exception e1) {
logger.error("failed to abort multi-part upload", e1);
}
}
}
this.pendingMultiPartUploads.clear();
if (e != null) {
throw new RuntimeException("failed while closing transport", e);
}
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project stocator by CODAIT.
the class COSAPIClient method getFileStatus.
@Override
public FileStatus getFileStatus(String hostName, Path path, String msg) throws IOException, FileNotFoundException {
FileStatus res = null;
FileStatus cached = memoryCache.getFileStatus(path.toString());
if (cached != null) {
return cached;
}
LOG.trace("getFileStatus(start) for {}, hostname: {}", path, hostName);
/*
* The requested path is equal to hostName. HostName is equal to
* hostNameScheme, thus the container. Therefore we have no object to look
* for and we return the FileStatus as a directory. Containers have to
* lastModified.
*/
if (path.toString().equals(hostName) || (path.toString().length() + 1 == hostName.length())) {
LOG.trace("getFileStatus(completed) {}", path);
res = new FileStatus(0L, true, 1, mBlockSize, 0L, path);
memoryCache.putFileStatus(path.toString(), res);
return res;
}
if (path.toString().contains(HADOOP_TEMPORARY)) {
LOG.debug("getFileStatus on temp object {}. Return not found", path.toString());
throw new FileNotFoundException("Not found " + path.toString());
}
String key = pathToKey(path);
LOG.debug("getFileStatus: on original key {}", key);
FileStatus fileStatus = null;
try {
fileStatus = getFileStatusKeyBased(key, path);
} catch (AmazonS3Exception e) {
LOG.warn("file status {} returned {}", key, e.getStatusCode());
if (e.getStatusCode() != 404) {
LOG.warn("Throw IOException for {}. Most likely authentication failed", key);
throw new IOException(e);
}
}
if (fileStatus != null) {
LOG.trace("getFileStatus(completed) {}", path);
memoryCache.putFileStatus(path.toString(), fileStatus);
return fileStatus;
}
// probably not needed this call
if (!key.endsWith("/")) {
String newKey = key + "/";
try {
LOG.debug("getFileStatus: original key not found. Alternative key {}", newKey);
fileStatus = getFileStatusKeyBased(newKey, path);
} catch (AmazonS3Exception e) {
if (e.getStatusCode() != 404) {
throw new IOException(e);
}
}
if (fileStatus != null) {
LOG.trace("getFileStatus(completed) {}", path);
memoryCache.putFileStatus(path.toString(), fileStatus);
return fileStatus;
} else {
// if here: both key and key/ returned not found.
// trying to see if pseudo directory of the form
// a/b/key/d/e (a/b/key/ doesn't exists by itself)
// perform listing on the key
LOG.debug("getFileStatus: Modifined key {} not found. Trying to list", key);
key = maybeAddTrailingSlash(key);
ListObjectsRequest request = new ListObjectsRequest();
request.setBucketName(mBucket);
request.setPrefix(key);
request.setDelimiter("/");
request.setMaxKeys(1);
ObjectListing objects = mClient.listObjects(request);
if (!objects.getCommonPrefixes().isEmpty() || !objects.getObjectSummaries().isEmpty()) {
LOG.debug("getFileStatus(completed) {}", path);
res = new FileStatus(0, true, 1, 0, 0, path);
memoryCache.putFileStatus(path.toString(), res);
return res;
} else if (key.isEmpty()) {
LOG.trace("Found root directory");
LOG.debug("getFileStatus(completed) {}", path);
res = new FileStatus(0, true, 1, 0, 0, path);
memoryCache.putFileStatus(path.toString(), res);
return res;
}
}
}
LOG.debug("Not found {}. Throw FNF exception", path.toString());
throw new FileNotFoundException("Not found " + path.toString());
}
use of com.amazonaws.services.s3.model.AmazonS3Exception in project presto by prestodb.
the class S3SelectLineRecordReader method readLine.
private int readLine(Text value) throws IOException {
try {
return retry().maxAttempts(maxAttempts).exponentialBackoff(BACKOFF_MIN_SLEEP, maxBackoffTime, maxRetryTime, 2.0).stopOn(InterruptedException.class, UnrecoverableS3OperationException.class).run("readRecordsContentStream", () -> {
if (isFirstLine) {
recordsFromS3 = 0;
selectObjectContent = selectClient.getRecordsContent(selectObjectContentRequest);
closer.register(selectObjectContent);
reader = new LineReader(selectObjectContent, lineDelimiter.getBytes(StandardCharsets.UTF_8));
closer.register(reader);
isFirstLine = false;
}
try {
return reader.readLine(value);
} catch (RuntimeException e) {
isFirstLine = true;
recordsFromS3 = 0;
if (e instanceof AmazonS3Exception) {
switch(((AmazonS3Exception) e).getStatusCode()) {
case HTTP_FORBIDDEN:
case HTTP_NOT_FOUND:
case HTTP_BAD_REQUEST:
throw new UnrecoverableS3OperationException(selectClient.getBucketName(), selectClient.getKeyName(), e);
}
}
throw e;
}
});
} catch (Exception e) {
throwIfInstanceOf(e, IOException.class);
throwIfUnchecked(e);
throw new RuntimeException(e);
}
}
Aggregations