use of com.amazonaws.SdkBaseException in project aws-appflow-custom-connector-java by awslabs.
the class EntitlementUtil method checkEntitlement.
/**
* Checks if the Connector subscribed from Marketplace has entitlement to use or not for an AWS account.
*
* @param productSKU - ProductSKU generated by marketplace at the time of connector registration.
* @return - True if the entitlement is successful otherwise false.
*/
public boolean checkEntitlement(final String productSKU) {
final CheckoutLicenseRequest request = new CheckoutLicenseRequest().withCheckoutType(CheckoutType.PROVISIONAL).withProductSKU(productSKU).withKeyFingerprint(KEY_FINGERPRINT).withEntitlements(Collections.singleton(getMarketplaceUsageEntitlement())).withClientToken(UUID.randomUUID().toString());
try {
final CheckoutLicenseResult result = client.checkoutLicense(request);
final List<String> entitlements = result.getEntitlementsAllowed().stream().map(EntitlementData::getName).collect(Collectors.toList());
if (entitlements.contains(AWS_MARKETPLACE_USAGE_ENTITLEMENT_NAME)) {
return true;
}
} catch (SdkBaseException ex) {
LOGGER.error("Entitlement check failed with exception" + ExceptionUtils.getStackTrace(ex));
}
return false;
}
use of com.amazonaws.SdkBaseException in project nexus-public by sonatype.
the class ProducerConsumerUploader method upload.
@Override
@Guarded(by = STARTED)
@Timed
public void upload(final AmazonS3 s3, final String bucket, final String key, final InputStream contents) {
try (InputStream input = new BufferedInputStream(contents, chunkSize)) {
log.debug("Starting upload to key {} in bucket {}", key, bucket);
input.mark(chunkSize);
ChunkReader firstReader = new ChunkReader(input, readChunk);
Chunk firstChunk = firstReader.readChunk(chunkSize).orElse(EMPTY_CHUNK);
input.reset();
if (firstChunk.dataLength < chunkSize) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(firstChunk.dataLength);
s3.putObject(bucket, key, new ByteArrayInputStream(firstChunk.data, 0, firstChunk.dataLength), metadata);
} else {
InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key);
final String uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();
try (Timer.Context uploadContext = multipartUpload.time()) {
List<PartETag> partETags = submitPartUploads(input, bucket, key, uploadId, s3);
s3.completeMultipartUpload(new CompleteMultipartUploadRequest().withBucketName(bucket).withKey(key).withUploadId(uploadId).withPartETags(partETags));
} catch (InterruptedException interrupted) {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
Thread.currentThread().interrupt();
} catch (CancellationException | SdkBaseException ex) {
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
throw new BlobStoreException(format("Error executing parallel requests for bucket:%s key:%s with uploadId:%s", bucket, key, uploadId), ex, null);
}
}
log.debug("Finished upload to key {} in bucket {}", key, bucket);
} catch (IOException | SdkClientException e) {
// NOSONAR
throw new BlobStoreException(format("Error uploading blob to bucket:%s key:%s", bucket, key), e, null);
}
}
Aggregations