use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project stocator by SparkTC.
the class COSAPIClient method createObject.
@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata, Statistics statistics, boolean overwrite) throws IOException {
LOG.debug("Create object {}", objName);
try {
String objNameWithoutBucket = objName;
if (objName.startsWith(mBucket + "/")) {
objNameWithoutBucket = objName.substring(mBucket.length() + 1);
}
// write will fail in case of a concurrent write operation
if (overwrite == false && !atomicWriteEnabled) {
LOG.warn("overwrite == false and atomic write mode is not enabled " + "the object will be overwritten if already exists");
}
Boolean avoidOverwrite = atomicWriteEnabled && !overwrite;
if (blockUploadEnabled) {
return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBucket, new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true), partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBucket), metadata, avoidOverwrite), null);
}
if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
return new FSDataOutputStream(new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this, avoidOverwrite), statistics);
} else {
// Note - no need for atomic write in case of directory
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(0L);
om.setContentType(contentType);
om.setUserMetadata(metadata);
// Remove the bucket name prefix from key path
if (objName.startsWith(mBucket + "/")) {
objName = objName.substring(mBucket.length() + 1);
}
/*
if (!objName.endsWith("/")) {
objName = objName + "/";
}*/
LOG.debug("bucket: {}, key {}", mBucket, objName);
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
OutputStream fakeStream = new OutputStream() {
@Override
public void write(int b) throws IOException {
}
@Override
public void close() throws IOException {
super.close();
}
};
return new FSDataOutputStream(fakeStream, statistics);
}
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted creating " + objName);
} catch (IOException e) {
LOG.error(e.getMessage());
throw e;
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project stocator by SparkTC.
the class COSOutputStream method close.
@Override
public void close() throws IOException {
if (closed.getAndSet(true)) {
return;
}
mBackupOutputStream.close();
LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
try {
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(mBackupFile.length());
om.setContentType(mContentType);
om.setUserMetadata(mMetadata);
// to ensure the write is atomic
if (mAvoidOverwrite) {
LOG.debug("Avoid Overwrite - setting If-None-Match header");
om.setHeader("If-None-Match", "*");
}
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
} catch (AmazonClientException e) {
throw new IOException(String.format("saving output %s %s", mKey, e));
} finally {
if (!mBackupFile.delete()) {
LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
}
super.close();
}
LOG.debug("OutputStream for key '{}' upload complete", mKey);
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project gradle-s3-build-cache by myniva.
the class AwsS3BuildCacheService method store.
@Override
public void store(BuildCacheKey key, BuildCacheEntryWriter writer) {
final String bucketPath = getBucketPath(key);
logger.info("Start storing cache entry '{}' in S3 bucket", bucketPath);
ObjectMetadata meta = new ObjectMetadata();
meta.setContentType(BUILD_CACHE_CONTENT_TYPE);
try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
writer.writeTo(os);
meta.setContentLength(os.size());
try (InputStream is = new ByteArrayInputStream(os.toByteArray())) {
PutObjectRequest request = getPutObjectRequest(bucketPath, meta, is);
if (this.reducedRedundancy) {
request.withStorageClass(StorageClass.ReducedRedundancy);
}
s3.putObject(request);
}
} catch (IOException e) {
throw new BuildCacheException("Error while storing cache object in S3 bucket", e);
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project Synapse-Stack-Builder by Sage-Bionetworks.
the class EC2SecuritySetup method createOrGetKeyPair.
/**
* Create the key par
* @return
*/
public KeyPairInfo createOrGetKeyPair() {
String name = config.getStackKeyPairName();
KeyPairInfo info = describeKeyPair();
if (info == null) {
log.debug("Creating the Stack KeyPair: " + name + " for the first time");
CreateKeyPairResult kpResult = ec2Client.createKeyPair(new CreateKeyPairRequest(name));
File temp = null;
FileOutputStream fos = null;
try {
temp = File.createTempFile("Temp", ".tmp");
fos = new FileOutputStream(temp);
// Write the material to the file.
fos.write(kpResult.getKeyPair().getKeyMaterial().getBytes("UTF-8"));
fos.close();
// Now write the file to S3
s3Client.putObject(new PutObjectRequest(config.getStackConfigS3BucketName(), config.getStackKeyPairS3File(), temp));
} catch (IOException e) {
// convert to runtime
throw new RuntimeException(e);
} finally {
if (fos != null) {
try {
fos.close();
} catch (IOException e) {
}
}
if (temp != null) {
temp.delete();
}
}
return describeKeyPair();
} else {
log.debug("Stack KeyPair: " + name + " already exists");
return info;
}
}
use of software.amazon.awssdk.services.s3.model.PutObjectRequest in project sic by belluccifranco.
the class AmazonServiceImpl method saveFileIntoS3Bucket.
@Override
public String saveFileIntoS3Bucket(String key, InputStream file, String contentType) {
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentType(contentType);
try {
this.getAmazonS3Client().putObject(new PutObjectRequest(bucketName, key, file, objectMetadata).withCannedAcl(CannedAccessControlList.PublicRead));
} catch (SdkClientException ex) {
LOGGER.error(ex.getMessage());
throw new ServiceException(ResourceBundle.getBundle("Mensajes").getString("mensaje_acceso_S3_error"), ex);
}
return amazonS3url + bucketName + "/" + key;
}
Aggregations