use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project aws-doc-sdk-examples by awsdocs.
the class PutObject method main.
public static void main(String[] args) {
final String USAGE = "\n" + "To run this example, supply the name of an S3 bucket and a file to\n" + "upload to it.\n" + "\n" + "Ex: PutObject <bucketname> <filename>\n";
if (args.length < 2) {
System.out.println(USAGE);
System.exit(1);
}
String bucket_name = args[0];
String file_path = args[1];
String key_name = Paths.get(file_path).getFileName().toString();
System.out.format("Uploading %s to S3 bucket %s...\n", file_path, bucket_name);
final AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
try {
s3.putObject(bucket_name, key_name, file_path);
} catch (AmazonServiceException e) {
System.err.println(e.getErrorMessage());
System.exit(1);
}
System.out.println("Done!");
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project aws-xray-sdk-java by aws.
the class TracingHandlerTest method testS3PutObjectSubsegmentContainsBucketName.
@Test
public void testS3PutObjectSubsegmentContainsBucketName() {
// Setup test
AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRequestHandlers(new TracingHandler()).withRegion(Regions.US_EAST_1).withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("fake", "fake"))).build();
mockHttpClient(s3, null);
final String BUCKET = "test-bucket", KEY = "test-key";
// Test logic
Segment segment = AWSXRay.beginSegment("test");
s3.putObject(BUCKET, KEY, "This is a test from java");
Assert.assertEquals(1, segment.getSubsegments().size());
Assert.assertEquals("PutObject", segment.getSubsegments().get(0).getAws().get("operation"));
System.out.println(segment.getSubsegments().get(0).getAws());
Assert.assertEquals(BUCKET, segment.getSubsegments().get(0).getAws().get("bucket_name"));
Assert.assertEquals(KEY, segment.getSubsegments().get(0).getAws().get("key"));
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project gradle-s3-build-cache by myniva.
the class AwsS3BuildCacheServiceFactory method createBuildCacheService.
@Override
public BuildCacheService createBuildCacheService(AwsS3BuildCache config, Describer describer) {
logger.debug("Start creating S3 build cache service");
describer.type("AWS S3").config("Region", config.getRegion()).config("Bucket", config.getBucket()).config("Reduced Redundancy", String.valueOf(config.isReducedRedundancy()));
if (config.getPath() != null) {
describer.config("Path", config.getPath());
}
if (config.getEndpoint() != null) {
describer.config("Endpoint", config.getEndpoint());
}
verifyConfig(config);
AmazonS3 s3 = createS3Client(config);
return new AwsS3BuildCacheService(s3, config.getBucket(), config.getPath(), config.isReducedRedundancy());
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project athenz by yahoo.
the class S3ChangeLogStore method getUpdatedSignedDomains.
@Override
public SignedDomains getUpdatedSignedDomains(StringBuilder lastModTimeBuffer) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("getUpdatedSignedDomains: Retrieving updating signed domains from S3...");
}
// We need save the timestamp at the beginning just in case we end up getting
// paged results and while processing the last page, S3 gets pushed
// updated domains from the earlier pages
lastModTimeBuffer.append(System.currentTimeMillis());
// AWS S3 API does not provide support for listing objects filtered
// based on its last modification timestamp so we need to get
// the full list and filter ourselves
// instead of using our fetched s3 client, we're going to
// obtain a new one to get the changes
AmazonS3 s3 = getS3Client();
ArrayList<String> domains = new ArrayList<>();
listObjects(s3, domains, lastModTime);
if (LOGGER.isInfoEnabled()) {
LOGGER.info("getUpdatedSignedDomains: {} updated domains", domains.size());
}
ArrayList<SignedDomain> signedDomainList = new ArrayList<>();
SignedDomain signedDomain = null;
for (String domain : domains) {
signedDomain = getSignedDomain(s3, domain);
if (signedDomain != null) {
signedDomainList.add(signedDomain);
}
}
SignedDomains signedDomains = new SignedDomains();
signedDomains.setDomains(signedDomainList);
return signedDomains;
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project nifi by apache.
the class DeleteS3Object method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
final String versionId = context.getProperty(VERSION_ID).evaluateAttributeExpressions(flowFile).getValue();
final AmazonS3 s3 = getClient();
// Deletes a key on Amazon S3
try {
if (versionId == null) {
final DeleteObjectRequest r = new DeleteObjectRequest(bucket, key);
// This call returns success if object doesn't exist
s3.deleteObject(r);
} else {
final DeleteVersionRequest r = new DeleteVersionRequest(bucket, key, versionId);
s3.deleteVersion(r);
}
} catch (final AmazonServiceException ase) {
getLogger().error("Failed to delete S3 Object for {}; routing to failure", new Object[] { flowFile, ase });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
return;
}
session.transfer(flowFile, REL_SUCCESS);
final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
getLogger().info("Successfully delete S3 Object for {} in {} millis; routing to success", new Object[] { flowFile, transferMillis });
}
Aggregations