use of com.amazonaws.services.s3.model.PutObjectResult in project camel by apache.
the class AmazonS3ClientMock method putObject.
@SuppressWarnings("resource")
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException {
putObjectRequests.add(putObjectRequest);
S3Object s3Object = new S3Object();
s3Object.setBucketName(putObjectRequest.getBucketName());
s3Object.setKey(putObjectRequest.getKey());
if (putObjectRequest.getFile() != null) {
try {
s3Object.setObjectContent(new FileInputStream(putObjectRequest.getFile()));
} catch (FileNotFoundException e) {
throw new AmazonServiceException("Cannot store the file object.", e);
}
} else {
s3Object.setObjectContent(putObjectRequest.getInputStream());
}
objects.add(s3Object);
PutObjectResult putObjectResult = new PutObjectResult();
putObjectResult.setETag("3a5c8b1ad448bca04584ecb55b836264");
return putObjectResult;
}
use of com.amazonaws.services.s3.model.PutObjectResult in project camel by apache.
the class S3Producer method processSingleOp.
public void processSingleOp(final Exchange exchange) throws Exception {
ObjectMetadata objectMetadata = determineMetadata(exchange);
File filePayload = null;
InputStream is = null;
Object obj = exchange.getIn().getMandatoryBody();
PutObjectRequest putObjectRequest = null;
// Need to check if the message body is WrappedFile
if (obj instanceof WrappedFile) {
obj = ((WrappedFile<?>) obj).getFile();
}
if (obj instanceof File) {
filePayload = (File) obj;
is = new FileInputStream(filePayload);
} else {
is = exchange.getIn().getMandatoryBody(InputStream.class);
}
putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), is, objectMetadata);
String storageClass = determineStorageClass(exchange);
if (storageClass != null) {
putObjectRequest.setStorageClass(storageClass);
}
String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
if (cannedAcl != null) {
CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
putObjectRequest.setCannedAcl(objectAcl);
}
AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
if (acl != null) {
// note: if cannedacl and acl are both specified the last one will be used. refer to
// PutObjectRequest#setAccessControlList for more details
putObjectRequest.setAccessControlList(acl);
}
LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);
PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);
LOG.trace("Received result [{}]", putObjectResult);
Message message = getMessageForResponse(exchange);
message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
if (putObjectResult.getVersionId() != null) {
message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
}
if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
// close streams
IOHelper.close(putObjectRequest.getInputStream());
IOHelper.close(is);
FileUtil.deleteFile(filePayload);
}
}
use of com.amazonaws.services.s3.model.PutObjectResult in project hadoop by apache.
the class S3ABlockOutputStream method putObject.
/**
* Upload the current block as a single PUT request; if the buffer
* is empty a 0-byte PUT will be invoked, as it is needed to create an
* entry at the far end.
* @throws IOException any problem.
*/
private void putObject() throws IOException {
LOG.debug("Executing regular upload for {}", writeOperationHelper);
final S3ADataBlocks.DataBlock block = getActiveBlock();
int size = block.dataSize();
final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
fs.setOptionalPutRequestParameters(putObjectRequest);
long transferQueueTime = now();
BlockUploadProgress callback = new BlockUploadProgress(block, progressListener, transferQueueTime);
putObjectRequest.setGeneralProgressListener(callback);
statistics.blockUploadQueued(size);
ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
@Override
public PutObjectResult call() throws Exception {
PutObjectResult result;
try {
// the putObject call automatically closes the input
// stream afterwards.
result = writeOperationHelper.putObject(putObjectRequest);
} finally {
closeAll(LOG, uploadData, block);
}
return result;
}
});
clearActiveBlock();
//wait for completion
try {
putObjectResult.get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted object upload", ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
throw extractException("regular upload", key, ee);
}
}
use of com.amazonaws.services.s3.model.PutObjectResult in project hadoop by apache.
the class S3AFileSystem method putObjectDirect.
/**
* PUT an object directly (i.e. not via the transfer manager).
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* <i>Important: this call will close any input stream in the request.</i>
* @param putObjectRequest the request
* @return the upload initiated
* @throws AmazonClientException on problems
*/
public PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest) throws AmazonClientException {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
incrementPutStartStatistics(len);
try {
PutObjectResult result = s3.putObject(putObjectRequest);
incrementPutCompletedStatistics(true, len);
return result;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
use of com.amazonaws.services.s3.model.PutObjectResult in project elasticsearch by elastic.
the class TestAmazonS3 method putObject.
@Override
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
if (shouldFail(bucketName, key, writeFailureRate)) {
long length = metadata.getContentLength();
long partToRead = (long) (length * randomDouble());
byte[] buffer = new byte[1024];
for (long cur = 0; cur < partToRead; cur += buffer.length) {
try {
input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
} catch (IOException ex) {
throw new ElasticsearchException("cannot read input stream", ex);
}
}
logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
ex.setStatusCode(400);
ex.setErrorCode("RequestTimeout");
throw ex;
} else {
return super.putObject(bucketName, key, input, metadata);
}
}
Aggregations