use of com.amazonaws.services.s3.model.ObjectMetadata in project camel by apache.
the class S3Producer method processSingleOp.
public void processSingleOp(final Exchange exchange) throws Exception {
ObjectMetadata objectMetadata = determineMetadata(exchange);
File filePayload = null;
InputStream is = null;
Object obj = exchange.getIn().getMandatoryBody();
PutObjectRequest putObjectRequest = null;
// Need to check if the message body is WrappedFile
if (obj instanceof WrappedFile) {
obj = ((WrappedFile<?>) obj).getFile();
}
if (obj instanceof File) {
filePayload = (File) obj;
is = new FileInputStream(filePayload);
} else {
is = exchange.getIn().getMandatoryBody(InputStream.class);
}
putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), is, objectMetadata);
String storageClass = determineStorageClass(exchange);
if (storageClass != null) {
putObjectRequest.setStorageClass(storageClass);
}
String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
if (cannedAcl != null) {
CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
putObjectRequest.setCannedAcl(objectAcl);
}
AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
if (acl != null) {
// note: if cannedacl and acl are both specified the last one will be used. refer to
// PutObjectRequest#setAccessControlList for more details
putObjectRequest.setAccessControlList(acl);
}
LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);
PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);
LOG.trace("Received result [{}]", putObjectResult);
Message message = getMessageForResponse(exchange);
message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
if (putObjectResult.getVersionId() != null) {
message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
}
if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
// close streams
IOHelper.close(putObjectRequest.getInputStream());
IOHelper.close(is);
FileUtil.deleteFile(filePayload);
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project hadoop by apache.
the class S3AFileSystem method getObjectMetadata.
/**
* Request object metadata; increments counters in the process.
* @param key key
* @return the metadata
*/
protected ObjectMetadata getObjectMetadata(String key) {
incrementStatistic(OBJECT_METADATA_REQUESTS);
GetObjectMetadataRequest request = new GetObjectMetadataRequest(bucket, key);
//SSE-C requires to be filled in if enabled for object metadata
if (S3AEncryptionMethods.SSE_C.equals(serverSideEncryptionAlgorithm) && StringUtils.isNotBlank(getServerSideEncryptionKey(getConf()))) {
request.setSSECustomerKey(generateSSECustomerKey());
}
ObjectMetadata meta = s3.getObjectMetadata(request);
incrementReadOperations();
return meta;
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project hadoop by apache.
the class S3AFileSystem method copyFile.
/**
* Copy a single object in the bucket via a COPY operation.
* @param srcKey source object path
* @param dstKey destination object path
* @param size object size
* @throws AmazonClientException on failures inside the AWS SDK
* @throws InterruptedIOException the operation was interrupted
* @throws IOException Other IO problems
*/
private void copyFile(String srcKey, String dstKey, long size) throws IOException, InterruptedIOException, AmazonClientException {
LOG.debug("copyFile {} -> {} ", srcKey, dstKey);
try {
ObjectMetadata srcom = getObjectMetadata(srcKey);
ObjectMetadata dstom = cloneObjectMetadata(srcom);
setOptionalObjectMetadata(dstom);
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
setOptionalCopyObjectRequestParameters(copyObjectRequest);
copyObjectRequest.setCannedAccessControlList(cannedACL);
copyObjectRequest.setNewObjectMetadata(dstom);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch(progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
incrementWriteOperations();
break;
default:
break;
}
}
};
Copy copy = transfers.copy(copyObjectRequest);
copy.addProgressListener(progressListener);
try {
copy.waitForCopyResult();
incrementWriteOperations();
instrumentation.filesCopied(1, size);
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + srcKey + " to " + dstKey + ", cancelling");
}
} catch (AmazonClientException e) {
throw translateException("copyFile(" + srcKey + ", " + dstKey + ")", srcKey, e);
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project hadoop by apache.
the class S3AFileSystem method newPutObjectRequest.
/**
* Create a {@link PutObjectRequest} request.
* The metadata is assumed to have been configured with the size of the
* operation.
* @param key key of object
* @param metadata metadata header
* @param inputStream source data.
* @return the request
*/
private PutObjectRequest newPutObjectRequest(String key, ObjectMetadata metadata, InputStream inputStream) {
Preconditions.checkNotNull(inputStream);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, inputStream, metadata);
setOptionalPutRequestParameters(putObjectRequest);
putObjectRequest.setCannedAcl(cannedACL);
return putObjectRequest;
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project hadoop by apache.
the class ITestS3AEncryptionSSEKMSDefaultKey method assertEncrypted.
@Override
protected void assertEncrypted(Path path) throws IOException {
ObjectMetadata md = getFileSystem().getObjectMetadata(path);
assertEquals("aws:kms", md.getSSEAlgorithm());
assertThat(md.getSSEAwsKmsKeyId(), containsString("arn:aws:kms:"));
}
Aggregations