use of com.amazonaws.services.s3.model.Bucket in project camel by apache.
the class AmazonS3ClientMock method listBuckets.
@Override
public List<Bucket> listBuckets() throws AmazonClientException, AmazonServiceException {
ArrayList<Bucket> list = new ArrayList<Bucket>();
Bucket bucket = new Bucket("camel-bucket");
bucket.setOwner(new Owner("Camel", "camel"));
bucket.setCreationDate(new Date());
list.add(bucket);
return list;
}
use of com.amazonaws.services.s3.model.Bucket in project camel by apache.
the class S3ComponentNonExistingBucketTest method sendCustomHeaderValues.
@Test
public void sendCustomHeaderValues() throws Exception {
result.expectedMessageCount(1);
final Date now = new Date();
final Map<String, String> s3Headers = new HashMap<String, String>();
s3Headers.put("x-aws-s3-header", "extra");
Exchange exchange = template.send("direct:start", ExchangePattern.InOnly, new Processor() {
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(S3Constants.STORAGE_CLASS, "STANDARD");
exchange.getIn().setHeader(S3Constants.KEY, "CamelUnitTest");
exchange.getIn().setHeader(S3Constants.CONTENT_LENGTH, 2L);
exchange.getIn().setHeader(S3Constants.CONTENT_TYPE, "text/html");
exchange.getIn().setHeader(S3Constants.CACHE_CONTROL, "no-cache");
exchange.getIn().setHeader(S3Constants.CONTENT_DISPOSITION, "attachment;");
exchange.getIn().setHeader(S3Constants.CONTENT_ENCODING, "gzip");
exchange.getIn().setHeader(S3Constants.CONTENT_MD5, "TWF");
exchange.getIn().setHeader(S3Constants.LAST_MODIFIED, now);
exchange.getIn().setHeader(S3Constants.S3_HEADERS, s3Headers);
exchange.getIn().setBody("This is my bucket content.");
}
});
assertMockEndpointsSatisfied();
assertResultExchange(result.getExchanges().get(0));
PutObjectRequest putObjectRequest = client.putObjectRequests.get(0);
assertEquals("STANDARD", putObjectRequest.getStorageClass());
assertEquals("nonExistingBucket", putObjectRequest.getBucketName());
assertEquals(2L, putObjectRequest.getMetadata().getContentLength());
assertEquals("text/html", putObjectRequest.getMetadata().getContentType());
assertEquals("no-cache", putObjectRequest.getMetadata().getCacheControl());
assertEquals("attachment;", putObjectRequest.getMetadata().getContentDisposition());
assertEquals("gzip", putObjectRequest.getMetadata().getContentEncoding());
assertEquals("TWF", putObjectRequest.getMetadata().getContentMD5());
assertEquals(now, putObjectRequest.getMetadata().getLastModified());
assertEquals("extra", putObjectRequest.getMetadata().getRawMetadataValue("x-aws-s3-header"));
assertResponseMessage(exchange.getIn());
}
use of com.amazonaws.services.s3.model.Bucket in project camel by apache.
the class S3ComponentExistingBucketTest method sendIn.
@Test
public void sendIn() throws Exception {
result.expectedMessageCount(1);
Exchange exchange = template.send("direct:start", ExchangePattern.InOnly, new Processor() {
public void process(Exchange exchange) throws Exception {
exchange.getIn().setHeader(S3Constants.KEY, "CamelUnitTest");
exchange.getIn().setBody("This is my bucket content.");
}
});
assertMockEndpointsSatisfied();
assertResultExchange(result.getExchanges().get(0));
PutObjectRequest putObjectRequest = client.putObjectRequests.get(0);
assertEquals("REDUCED_REDUNDANCY", putObjectRequest.getStorageClass());
assertEquals("mycamelbucket", putObjectRequest.getBucketName());
assertResponseMessage(exchange.getIn());
}
use of com.amazonaws.services.s3.model.Bucket in project camel by apache.
the class S3Producer method copyObject.
private void copyObject(AmazonS3 s3Client, Exchange exchange) {
String bucketNameDestination;
String destinationKey;
String sourceKey;
String bucketName;
String versionId;
bucketName = exchange.getIn().getHeader(S3Constants.BUCKET_NAME, String.class);
if (ObjectHelper.isEmpty(bucketName)) {
bucketName = getConfiguration().getBucketName();
}
sourceKey = exchange.getIn().getHeader(S3Constants.KEY, String.class);
destinationKey = exchange.getIn().getHeader(S3Constants.DESTINATION_KEY, String.class);
bucketNameDestination = exchange.getIn().getHeader(S3Constants.BUCKET_DESTINATION_NAME, String.class);
versionId = exchange.getIn().getHeader(S3Constants.VERSION_ID, String.class);
if (ObjectHelper.isEmpty(bucketName)) {
throw new IllegalArgumentException("Bucket Name must be specified for copyObject Operation");
}
if (ObjectHelper.isEmpty(bucketNameDestination)) {
throw new IllegalArgumentException("Bucket Name Destination must be specified for copyObject Operation");
}
if (ObjectHelper.isEmpty(sourceKey)) {
throw new IllegalArgumentException("Source Key must be specified for copyObject Operation");
}
if (ObjectHelper.isEmpty(destinationKey)) {
throw new IllegalArgumentException("Destination Key must be specified for copyObject Operation");
}
CopyObjectRequest copyObjectRequest;
if (ObjectHelper.isEmpty(versionId)) {
copyObjectRequest = new CopyObjectRequest(bucketName, sourceKey, bucketNameDestination, destinationKey);
} else {
copyObjectRequest = new CopyObjectRequest(bucketName, sourceKey, versionId, bucketNameDestination, destinationKey);
}
CopyObjectResult copyObjectResult = s3Client.copyObject(copyObjectRequest);
Message message = getMessageForResponse(exchange);
message.setHeader(S3Constants.E_TAG, copyObjectResult.getETag());
if (copyObjectResult.getVersionId() != null) {
message.setHeader(S3Constants.VERSION_ID, copyObjectResult.getVersionId());
}
}
use of com.amazonaws.services.s3.model.Bucket in project hadoop by apache.
the class S3AFileSystem method getObjectMetadata.
/**
* Request object metadata; increments counters in the process.
* @param key key
* @return the metadata
*/
protected ObjectMetadata getObjectMetadata(String key) {
incrementStatistic(OBJECT_METADATA_REQUESTS);
GetObjectMetadataRequest request = new GetObjectMetadataRequest(bucket, key);
//SSE-C requires to be filled in if enabled for object metadata
if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
request.setSSECustomerKey(generateSSECustomerKey());
}
ObjectMetadata meta = s3.getObjectMetadata(request);
incrementReadOperations();
return meta;
}
Aggregations