use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project alluxio by Alluxio.
the class S3AOutputStream method close.
@Override
public void close() throws IOException {
if (mClosed) {
return;
}
mLocalOutputStream.close();
String path = getUploadPath();
try {
// Generate the object metadata by setting server side encryption, md5 checksum, the file
// length, and encoding as octet stream since no assumptions are made about the file type
ObjectMetadata meta = new ObjectMetadata();
if (mSseEnabled) {
meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
if (mHash != null) {
meta.setContentMD5(new String(Base64.encode(mHash.digest())));
}
meta.setContentLength(mFile.length());
meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
// Generate the put request and wait for the transfer manager to complete the upload
PutObjectRequest putReq = new PutObjectRequest(mBucketName, path, mFile).withMetadata(meta);
getTransferManager().upload(putReq).waitForUploadResult();
} catch (Exception e) {
LOG.error("Failed to upload {}", path, e);
throw new IOException(e);
} finally {
// upload or if the upload failed.
if (!mFile.delete()) {
LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
}
// Set the closed flag, close can be retried until mFile.delete is called successfully
mClosed = true;
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project apex-malhar by apache.
the class S3Reconciler method processCommittedData.
/**
* Uploads the file on Amazon S3 using putObject API from S3 client
*/
@Override
protected void processCommittedData(FSRecordCompactionOperator.OutputMetaData outputMetaData) {
try {
Path path = new Path(outputMetaData.getPath());
if (fs.exists(path) == false) {
logger.debug("Ignoring non-existent path assuming replay : {}", path);
return;
}
FSDataInputStream fsinput = fs.open(path);
ObjectMetadata omd = new ObjectMetadata();
omd.setContentLength(outputMetaData.getSize());
String keyName = directoryName + Path.SEPARATOR + outputMetaData.getFileName();
PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd);
if (outputMetaData.getSize() < Integer.MAX_VALUE) {
request.getRequestClientOptions().setReadLimit((int) outputMetaData.getSize());
} else {
throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE");
}
if (fs.exists(path)) {
PutObjectResult result = s3client.putObject(request);
logger.debug("File {} Uploaded at {}", keyName, result.getETag());
}
} catch (FileNotFoundException e) {
logger.debug("Ignoring non-existent path assuming replay : {}", outputMetaData.getPath());
} catch (IOException e) {
logger.error("Unable to create Stream: {}", e.getMessage());
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project apex-malhar by apache.
the class S3InputModuleAppTest method setup.
@Before
public void setup() throws Exception {
client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
client.createBucket(testMeta.bucketKey);
inputDir = testMeta.baseDirectory + File.separator + "input";
outputDir = testMeta.baseDirectory + File.separator + "output";
File file1 = new File(inputDir + File.separator + FILE_1);
File file2 = new File(inputDir + File.separator + FILE_2);
FileUtils.writeStringToFile(file1, FILE_1_DATA);
FileUtils.writeStringToFile(file2, FILE_2_DATA);
client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_1, file1));
client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_2, file2));
files = SCHEME + "://" + accessKey + ":" + secretKey + "@" + testMeta.bucketKey + "/input";
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project stocator by SparkTC.
the class COSAPIClient method createObject.
@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata, Statistics statistics, boolean overwrite) throws IOException {
LOG.debug("Create object {}", objName);
try {
String objNameWithoutBucket = objName;
if (objName.startsWith(mBucket + "/")) {
objNameWithoutBucket = objName.substring(mBucket.length() + 1);
}
// write will fail in case of a concurrent write operation
if (overwrite == false && !atomicWriteEnabled) {
LOG.warn("overwrite == false and atomic write mode is not enabled " + "the object will be overwritten if already exists");
}
Boolean avoidOverwrite = atomicWriteEnabled && !overwrite;
if (blockUploadEnabled) {
return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBucket, new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true), partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBucket), metadata, avoidOverwrite), null);
}
if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
return new FSDataOutputStream(new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this, avoidOverwrite), statistics);
} else {
// Note - no need for atomic write in case of directory
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(0L);
om.setContentType(contentType);
om.setUserMetadata(metadata);
// Remove the bucket name prefix from key path
if (objName.startsWith(mBucket + "/")) {
objName = objName.substring(mBucket.length() + 1);
}
/*
if (!objName.endsWith("/")) {
objName = objName + "/";
}*/
LOG.debug("bucket: {}, key {}", mBucket, objName);
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
OutputStream fakeStream = new OutputStream() {
@Override
public void write(int b) throws IOException {
}
@Override
public void close() throws IOException {
super.close();
}
};
return new FSDataOutputStream(fakeStream, statistics);
}
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted creating " + objName);
} catch (IOException e) {
LOG.error(e.getMessage());
throw e;
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project stocator by SparkTC.
the class COSOutputStream method close.
@Override
public void close() throws IOException {
if (closed.getAndSet(true)) {
return;
}
mBackupOutputStream.close();
LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
try {
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(mBackupFile.length());
om.setContentType(mContentType);
om.setUserMetadata(mMetadata);
// to ensure the write is atomic
if (mAvoidOverwrite) {
LOG.debug("Avoid Overwrite - setting If-None-Match header");
om.setHeader("If-None-Match", "*");
}
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
} catch (AmazonClientException e) {
throw new IOException(String.format("saving output %s %s", mKey, e));
} finally {
if (!mBackupFile.delete()) {
LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
}
super.close();
}
LOG.debug("OutputStream for key '{}' upload complete", mKey);
}
Aggregations