use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project halyard by spinnaker.
the class S3Validator method validate.
@Override
public void validate(ConfigProblemSetBuilder ps, S3PersistentStore n) {
if (!StringUtils.isEmpty(n.getEndpoint())) {
return;
}
try {
AWSCredentialsProvider credentialsProvider = AwsAccountValidator.getAwsCredentialsProvider(n.getAccessKeyId(), n.getSecretAccessKey());
S3Config s3Config = new S3Config();
S3Properties s3Properties = new S3Properties();
s3Properties.setBucket(n.getBucket());
s3Properties.setRootFolder(n.getRootFolder());
s3Properties.setRegion(n.getRegion());
AmazonS3 s3Client = s3Config.awsS3Client(credentialsProvider, s3Properties);
new S3Config().s3StorageService(s3Client, s3Properties);
} catch (Exception e) {
ps.addProblem(Problem.Severity.ERROR, "Failed to ensure the required bucket \"" + n.getBucket() + "\" exists: " + e.getMessage());
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project opentest by mcdcorp.
the class GetS3Metadata method run.
@Override
public void run() {
super.run();
String awsCredentialsProfile = this.readStringArgument("awsProfile", "default");
String bucket = this.readStringArgument("bucket");
String objectKey = this.readStringArgument("objectKey");
AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider(awsCredentialsProfile));
ObjectMetadata metadata = s3Client.getObjectMetadata(new GetObjectMetadataRequest(bucket, objectKey));
try {
Date expirationTime = metadata.getExpirationTime();
if (expirationTime != null) {
this.writeOutput("expirationTime", metadata.getExpirationTime().getTime());
} else {
this.writeOutput("expirationTime", null);
}
this.writeOutput("lastModified", metadata.getLastModified().getTime());
this.writeOutput("userMetadata", metadata.getUserMetadata());
this.writeOutput("size", metadata.getContentLength());
this.writeOutput("storageClass", metadata.getStorageClass());
this.writeOutput("versionId", metadata.getVersionId());
} catch (Exception ex) {
throw new RuntimeException(String.format("Failed to get object metadata for object key %s in bucket %s", objectKey, bucket), ex);
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project SimpleContentPortlet by Jasig.
the class AmazonS3PersistenceStrategy method persistAttachmentBinary.
@Override
public String persistAttachmentBinary(HttpServletRequest request, Attachment attachment) throws PersistenceException {
// The data import is only going to update the attachment metadata in the database.
if (request == null && attachment.getData() == null) {
return null;
}
AmazonS3 s3 = new AmazonS3Client();
String key = PATH_FORMAT.format(new Object[] { s3BucketPath, attachment.getGuid(), attachment.getFilename() });
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(attachment.getContentType());
metadata.setContentLength(attachment.getData().length);
metadata.setCacheControl(s3CacheControlString);
// S3 chose base64-encoded hash not the typical 32-character hex string so convert accordingly.
// Hex.decodeHex(attachment.getChecksum().toCharArray())
metadata.setContentMD5(Base64.encodeBase64String(DatatypeConverter.parseHexBinary(attachment.getChecksum())));
try {
s3.putObject(new PutObjectRequest(s3BucketName, key, new ByteArrayInputStream(attachment.getData()), metadata));
log.debug("Successfully sent {} to S3 bucket {} under key {}", attachment.getFilename(), s3BucketName, key);
} catch (AmazonClientException e) {
String message = String.format("Unable to persist attachment %1s to S3 bucket %2s, key %3s", attachment.getFilename(), s3BucketName, key);
throw new PersistenceException(message, e);
}
return (s3BucketBaseUrl.endsWith("/") ? s3BucketBaseUrl : s3BucketBaseUrl + "/") + key;
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project tutorials by eugenp.
the class MultipartUpload method main.
public static void main(String[] args) throws Exception {
String existingBucketName = "baeldung-bucket";
String keyName = "my-picture.jpg";
String filePath = "documents/my-picture.jpg";
AmazonS3 amazonS3 = AmazonS3ClientBuilder.standard().withCredentials(new DefaultAWSCredentialsProviderChain()).withRegion(Regions.DEFAULT_REGION).build();
int maxUploadThreads = 5;
TransferManager tm = TransferManagerBuilder.standard().withS3Client(amazonS3).withMultipartUploadThreshold((long) (5 * 1024 * 1024)).withExecutorFactory(() -> Executors.newFixedThreadPool(maxUploadThreads)).build();
ProgressListener progressListener = progressEvent -> System.out.println("Transferred bytes: " + progressEvent.getBytesTransferred());
PutObjectRequest request = new PutObjectRequest(existingBucketName, keyName, new File(filePath));
request.setGeneralProgressListener(progressListener);
Upload upload = tm.upload(request);
try {
upload.waitForCompletion();
System.out.println("Upload complete.");
} catch (AmazonClientException e) {
System.out.println("Error occurred while uploading file");
e.printStackTrace();
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project components by Talend.
the class S3DatasetRuntimeTestIT method listBuckets.
@Test
@Ignore("It's slow (10 or more mins), our account doesn't allow to create this amount of buckets")
public void listBuckets() {
String uuid = UUID.randomUUID().toString().substring(0, 8);
String bucketFormat = "tcomp-s3-dataset-test-%s-" + uuid;
S3DatasetProperties s3DatasetProperties = s3.createS3DatasetProperties();
runtime.initialize(null, s3DatasetProperties);
AmazonS3 client = S3Connection.createClient(s3.createS3DatastoreProperties());
for (S3Region s3Region : getTestableS3Regions()) {
client.setEndpoint(s3Region.toEndpoint());
if (s3Region.equals(S3Region.US_EAST_1)) {
client.createBucket(String.format(bucketFormat, s3Region.getValue()));
} else {
client.createBucket(String.format(bucketFormat, s3Region.getValue()), s3Region.getValue());
}
s3DatasetProperties.region.setValue(s3Region);
Set<String> bucketNames = runtime.listBuckets();
assertTrue(bucketNames.size() > 0);
assertThat(bucketNames, hasItems(String.format(bucketFormat, s3Region.getValue())));
client.setEndpoint(s3Region.toEndpoint());
client.deleteBucket(String.format(bucketFormat, s3Region.getValue()));
}
}
Aggregations