use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project elasticsearch by elastic.
the class DefaultS3OutputStream method doUpload.
protected void doUpload(S3BlobStore blobStore, String bucketName, String blobName, InputStream is, int length, boolean serverSideEncryption) throws AmazonS3Exception {
ObjectMetadata md = new ObjectMetadata();
if (serverSideEncryption) {
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
md.setContentLength(length);
InputStream inputStream = is;
// We try to compute a MD5 while reading it
MessageDigest messageDigest;
try {
messageDigest = MessageDigest.getInstance("MD5");
inputStream = new DigestInputStream(is, messageDigest);
} catch (NoSuchAlgorithmException impossible) {
// Every implementation of the Java platform is required to support MD5 (see MessageDigest)
throw new RuntimeException(impossible);
}
PutObjectRequest putRequest = new PutObjectRequest(bucketName, blobName, inputStream, md).withStorageClass(blobStore.getStorageClass()).withCannedAcl(blobStore.getCannedACL());
PutObjectResult putObjectResult = blobStore.client().putObject(putRequest);
String localMd5 = Base64.encodeAsString(messageDigest.digest());
String remoteMd5 = putObjectResult.getContentMd5();
if (!localMd5.equals(remoteMd5)) {
logger.debug("MD5 local [{}], remote [{}] are not equal...", localMd5, remoteMd5);
throw new AmazonS3Exception("MD5 local [" + localMd5 + "], remote [" + remoteMd5 + "] are not equal...");
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project hadoop by apache.
the class S3AFileSystem method newPutObjectRequest.
/**
* Create a {@link PutObjectRequest} request.
* The metadata is assumed to have been configured with the size of the
* operation.
* @param key key of object
* @param metadata metadata header
* @param inputStream source data.
* @return the request
*/
private PutObjectRequest newPutObjectRequest(String key, ObjectMetadata metadata, InputStream inputStream) {
Preconditions.checkNotNull(inputStream);
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, inputStream, metadata);
setOptionalPutRequestParameters(putObjectRequest);
putObjectRequest.setCannedAcl(cannedACL);
return putObjectRequest;
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project hadoop by apache.
the class S3ABlockOutputStream method putObject.
/**
* Upload the current block as a single PUT request; if the buffer
* is empty a 0-byte PUT will be invoked, as it is needed to create an
* entry at the far end.
* @throws IOException any problem.
*/
private void putObject() throws IOException {
LOG.debug("Executing regular upload for {}", writeOperationHelper);
final S3ADataBlocks.DataBlock block = getActiveBlock();
int size = block.dataSize();
final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
fs.setOptionalPutRequestParameters(putObjectRequest);
long transferQueueTime = now();
BlockUploadProgress callback = new BlockUploadProgress(block, progressListener, transferQueueTime);
putObjectRequest.setGeneralProgressListener(callback);
statistics.blockUploadQueued(size);
ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
@Override
public PutObjectResult call() throws Exception {
PutObjectResult result;
try {
// the putObject call automatically closes the input
// stream afterwards.
result = writeOperationHelper.putObject(putObjectRequest);
} finally {
closeAll(LOG, uploadData, block);
}
return result;
}
});
clearActiveBlock();
//wait for completion
try {
putObjectResult.get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted object upload", ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
throw extractException("regular upload", key, ee);
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project jackrabbit-oak by apache.
the class S3Backend method write.
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException {
String key = getKeyName(identifier);
ObjectMetadata objectMetaData = null;
long start = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
// check if the same record already exists
try {
objectMetaData = s3service.getObjectMetadata(bucket, key);
} catch (AmazonServiceException ase) {
if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) {
throw ase;
}
}
if (objectMetaData != null) {
long l = objectMetaData.getContentLength();
if (l != file.length()) {
throw new DataStoreException("Collision: " + key + " new length: " + file.length() + " old length: " + l);
}
LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
copReq.setNewObjectMetadata(objectMetaData);
Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
try {
copy.waitForCopyResult();
LOG.debug("lastModified of [{}] updated successfully.", identifier);
if (callback != null) {
callback.onSuccess(new AsyncUploadResult(identifier, file));
}
} catch (Exception e2) {
AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file);
asyncUpRes.setException(e2);
if (callback != null) {
callback.onAbort(asyncUpRes);
}
throw new DataStoreException("Could not upload " + key, e2);
}
}
if (objectMetaData == null) {
try {
// start multipart parallel upload using amazon sdk
Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file)));
// wait for upload to finish
if (asyncUpload) {
up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback));
LOG.debug("added upload progress listener to identifier [{}]", identifier);
} else {
up.waitForUploadResult();
LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
if (callback != null) {
callback.onSuccess(new AsyncUploadResult(identifier, file));
}
}
} catch (Exception e2) {
AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file);
asyncUpRes.setException(e2);
if (callback != null) {
callback.onAbort(asyncUpRes);
}
throw new DataStoreException("Could not upload " + key, e2);
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) });
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project jackrabbit-oak by apache.
the class S3Backend method addMetadataRecord.
@Override
public void addMetadataRecord(File input, String name) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
Upload upload = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input)));
upload.waitForUploadResult();
} catch (InterruptedException e) {
LOG.error("Exception in uploading metadata file {}", new Object[] { input, e });
throw new DataStoreException("Error in uploading metadata file", e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
Aggregations