use of com.amazonaws.AmazonClientException in project crate by crate.
the class AwsEc2SeedHostsProvider method fetchDynamicNodes.
private List<TransportAddress> fetchDynamicNodes() {
final List<TransportAddress> dynamicHosts = new ArrayList<>();
final DescribeInstancesResult descInstances;
try (AmazonEc2Reference clientReference = awsEc2Service.client()) {
// Query EC2 API based on AZ, instance state, and tag.
// NOTE: we don't filter by security group during the describe instances request for two reasons:
// 1. differences in VPCs require different parameters during query (ID vs Name)
// 2. We want to use two different strategies: (all security groups vs. any security groups)
descInstances = clientReference.client().describeInstances(buildDescribeInstancesRequest());
} catch (final AmazonClientException e) {
LOGGER.info("Exception while retrieving instance list from AWS API: {}", e.getMessage());
LOGGER.debug("Full exception:", e);
return dynamicHosts;
}
LOGGER.trace("finding seed nodes...");
for (final Reservation reservation : descInstances.getReservations()) {
for (final Instance instance : reservation.getInstances()) {
// lets see if we can filter based on groups
if (!groups.isEmpty()) {
final List<GroupIdentifier> instanceSecurityGroups = instance.getSecurityGroups();
final List<String> securityGroupNames = new ArrayList<>(instanceSecurityGroups.size());
final List<String> securityGroupIds = new ArrayList<>(instanceSecurityGroups.size());
for (final GroupIdentifier sg : instanceSecurityGroups) {
securityGroupNames.add(sg.getGroupName());
securityGroupIds.add(sg.getGroupId());
}
if (bindAnyGroup) {
// We check if we can find at least one group name or one group id in groups.
if (disjoint(securityGroupNames, groups) && disjoint(securityGroupIds, groups)) {
LOGGER.trace("filtering out instance {} based on groups {}, not part of {}", instance.getInstanceId(), instanceSecurityGroups, groups);
// continue to the next instance
continue;
}
} else {
// We need tp match all group names or group ids, otherwise we ignore this instance
if (!(securityGroupNames.containsAll(groups) || securityGroupIds.containsAll(groups))) {
LOGGER.trace("filtering out instance {} based on groups {}, does not include all of {}", instance.getInstanceId(), instanceSecurityGroups, groups);
// continue to the next instance
continue;
}
}
}
String address = null;
if (hostType.equals(PRIVATE_DNS)) {
address = instance.getPrivateDnsName();
} else if (hostType.equals(PRIVATE_IP)) {
address = instance.getPrivateIpAddress();
} else if (hostType.equals(PUBLIC_DNS)) {
address = instance.getPublicDnsName();
} else if (hostType.equals(PUBLIC_IP)) {
address = instance.getPublicIpAddress();
} else if (hostType.startsWith(TAG_PREFIX)) {
// Reading the node host from its metadata
final String tagName = hostType.substring(TAG_PREFIX.length());
LOGGER.debug("reading hostname from [{}] instance tag", tagName);
final List<Tag> tags = instance.getTags();
for (final Tag tag : tags) {
if (tag.getKey().equals(tagName)) {
address = tag.getValue();
LOGGER.debug("using [{}] as the instance address", address);
}
}
} else {
throw new IllegalArgumentException(hostType + " is unknown for discovery.ec2.host_type");
}
if (address != null) {
try {
final TransportAddress[] addresses = transportService.addressesFromString(address);
for (int i = 0; i < addresses.length; i++) {
LOGGER.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]);
dynamicHosts.add(addresses[i]);
}
} catch (final Exception e) {
final String finalAddress = address;
LOGGER.warn((Supplier<?>) () -> new ParameterizedMessage("failed to add {}, address {}", instance.getInstanceId(), finalAddress), e);
}
} else {
LOGGER.trace("not adding {}, address is null, host_type {}", instance.getInstanceId(), hostType);
}
}
}
LOGGER.debug("using dynamic transport addresses {}", dynamicHosts);
return dynamicHosts;
}
use of com.amazonaws.AmazonClientException in project crate by crate.
the class S3BlobContainer method executeSingleUpload.
/**
* Uploads a blob using a single upload request
*/
void executeSingleUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
// Extra safety checks
if (blobSize > MAX_FILE_SIZE.getBytes()) {
throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
}
if (blobSize > blobStore.bufferSizeInBytes()) {
throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
}
final ObjectMetadata md = new ObjectMetadata();
md.setContentLength(blobSize);
if (blobStore.serverSideEncryption()) {
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
putRequest.setStorageClass(blobStore.getStorageClass());
putRequest.setCannedAcl(blobStore.getCannedACL());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().putObject(putRequest);
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
}
}
use of com.amazonaws.AmazonClientException in project crate by crate.
the class S3BlobContainer method executeMultipartUpload.
/**
* Uploads a blob using multipart upload requests.
*/
void executeMultipartUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException {
if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART);
}
if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) {
throw new IllegalArgumentException("Multipart upload request size [" + blobSize + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART);
}
final long partSize = blobStore.bufferSizeInBytes();
final Tuple<Long, Long> multiparts = numberOfMultiparts(blobSize, partSize);
if (multiparts.v1() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Too many multipart upload requests, maybe try a larger buffer size?");
}
final int nbParts = multiparts.v1().intValue();
final long lastPartSize = multiparts.v2();
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";
final SetOnce<String> uploadId = new SetOnce<>();
final String bucketName = blobStore.bucket();
boolean success = false;
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, blobName);
initRequest.setStorageClass(blobStore.getStorageClass());
initRequest.setCannedACL(blobStore.getCannedACL());
if (blobStore.serverSideEncryption()) {
final ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
initRequest.setObjectMetadata(md);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
uploadId.set(clientReference.client().initiateMultipartUpload(initRequest).getUploadId());
if (Strings.isEmpty(uploadId.get())) {
throw new IOException("Failed to initialize multipart upload " + blobName);
}
final List<PartETag> parts = new ArrayList<>();
long bytesCount = 0;
for (int i = 1; i <= nbParts; i++) {
final UploadPartRequest uploadRequest = new UploadPartRequest();
uploadRequest.setBucketName(bucketName);
uploadRequest.setKey(blobName);
uploadRequest.setUploadId(uploadId.get());
uploadRequest.setPartNumber(i);
uploadRequest.setInputStream(input);
if (i < nbParts) {
uploadRequest.setPartSize(partSize);
uploadRequest.setLastPart(false);
} else {
uploadRequest.setPartSize(lastPartSize);
uploadRequest.setLastPart(true);
}
bytesCount += uploadRequest.getPartSize();
final UploadPartResult uploadResponse = clientReference.client().uploadPart(uploadRequest);
parts.add(uploadResponse.getPartETag());
}
if (bytesCount != blobSize) {
throw new IOException("Failed to execute multipart upload for [" + blobName + "], expected " + blobSize + "bytes sent but got " + bytesCount);
}
final CompleteMultipartUploadRequest complRequest = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId.get(), parts);
clientReference.client().completeMultipartUpload(complRequest);
success = true;
} catch (final AmazonClientException e) {
throw new IOException("Unable to upload object [" + blobName + "] using multipart upload", e);
} finally {
if ((success == false) && Strings.hasLength(uploadId.get())) {
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, blobName, uploadId.get());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
clientReference.client().abortMultipartUpload(abortRequest);
}
}
}
}
use of com.amazonaws.AmazonClientException in project crate by crate.
the class S3BlobContainer method doDeleteBlobs.
private void doDeleteBlobs(List<String> blobNames, boolean relative) throws IOException {
if (blobNames.isEmpty()) {
return;
}
final Set<String> outstanding;
if (relative) {
outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet());
} else {
outstanding = new HashSet<>(blobNames);
}
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
final List<String> partition = new ArrayList<>();
for (String key : outstanding) {
partition.add(key);
if (partition.size() == MAX_BULK_DELETES) {
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
partition.clear();
}
}
if (partition.isEmpty() == false) {
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
}
AmazonClientException aex = null;
for (DeleteObjectsRequest deleteRequest : deleteRequests) {
List<String> keysInRequest = deleteRequest.getKeys().stream().map(DeleteObjectsRequest.KeyVersion::getKey).collect(Collectors.toList());
try {
clientReference.client().deleteObjects(deleteRequest);
outstanding.removeAll(keysInRequest);
} catch (MultiObjectDeleteException e) {
// We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead
// first remove all keys that were sent in the request and then add back those that ran into an exception.
outstanding.removeAll(keysInRequest);
outstanding.addAll(e.getErrors().stream().map(MultiObjectDeleteException.DeleteError::getKey).collect(Collectors.toSet()));
aex = ExceptionsHelper.useOrSuppress(aex, e);
} catch (AmazonClientException e) {
// The AWS client threw any unexpected exception and did not execute the request at all so we do not
// remove any keys from the outstanding deletes set.
aex = ExceptionsHelper.useOrSuppress(aex, e);
}
}
if (aex != null) {
throw aex;
}
} catch (Exception e) {
throw new IOException("Failed to delete blobs [" + outstanding + "]", e);
}
assert outstanding.isEmpty();
}
use of com.amazonaws.AmazonClientException in project crate by crate.
the class S3BlobContainer method deleteBlobsIgnoringIfNotExists.
@Override
public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
if (blobNames.isEmpty()) {
return;
}
final Set<String> outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet());
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
final List<String> partition = new ArrayList<>();
for (String key : outstanding) {
partition.add(key);
if (partition.size() == MAX_BULK_DELETES) {
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
partition.clear();
}
}
if (partition.isEmpty() == false) {
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
}
AmazonClientException aex = null;
for (DeleteObjectsRequest deleteRequest : deleteRequests) {
List<String> keysInRequest = deleteRequest.getKeys().stream().map(DeleteObjectsRequest.KeyVersion::getKey).collect(Collectors.toList());
try {
clientReference.client().deleteObjects(deleteRequest);
outstanding.removeAll(keysInRequest);
} catch (MultiObjectDeleteException e) {
// We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead
// first remove all keys that were sent in the request and then add back those that ran into an exception.
outstanding.removeAll(keysInRequest);
outstanding.addAll(e.getErrors().stream().map(MultiObjectDeleteException.DeleteError::getKey).collect(Collectors.toSet()));
aex = ExceptionsHelper.useOrSuppress(aex, e);
} catch (AmazonClientException e) {
// The AWS client threw any unexpected exception and did not execute the request at all so we do not
// remove any keys from the outstanding deletes set.
aex = ExceptionsHelper.useOrSuppress(aex, e);
}
}
if (aex != null) {
throw aex;
}
} catch (Exception e) {
throw new IOException("Failed to delete blobs [" + outstanding + "]", e);
}
assert outstanding.isEmpty();
}
Aggregations