use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project gradle-s3-build-cache by myniva.
the class AwsS3BuildCacheService method store.
@Override
public void store(BuildCacheKey key, BuildCacheEntryWriter writer) {
final String bucketPath = getBucketPath(key);
logger.info("Start storing cache entry '{}' in S3 bucket", bucketPath);
ObjectMetadata meta = new ObjectMetadata();
meta.setContentType(BUILD_CACHE_CONTENT_TYPE);
try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
writer.writeTo(os);
meta.setContentLength(os.size());
try (InputStream is = new ByteArrayInputStream(os.toByteArray())) {
PutObjectRequest request = getPutObjectRequest(bucketPath, meta, is);
if (this.reducedRedundancy) {
request.withStorageClass(StorageClass.ReducedRedundancy);
}
s3.putObject(request);
}
} catch (IOException e) {
throw new BuildCacheException("Error while storing cache object in S3 bucket", e);
}
}
use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project nifi by apache.
the class AbstractS3IT method putTestFileEncrypted.
protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException {
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file), objectMetadata);
client.putObject(putRequest);
}
use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project nifi by apache.
the class FetchS3Object method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
final String versionId = context.getProperty(VERSION_ID).evaluateAttributeExpressions(flowFile).getValue();
final AmazonS3 client = getClient();
final GetObjectRequest request;
if (versionId == null) {
request = new GetObjectRequest(bucket, key);
} else {
request = new GetObjectRequest(bucket, key, versionId);
}
final Map<String, String> attributes = new HashMap<>();
try (final S3Object s3Object = client.getObject(request)) {
flowFile = session.importFrom(s3Object.getObjectContent(), flowFile);
attributes.put("s3.bucket", s3Object.getBucketName());
final ObjectMetadata metadata = s3Object.getObjectMetadata();
if (metadata.getContentDisposition() != null) {
final String fullyQualified = metadata.getContentDisposition();
final int lastSlash = fullyQualified.lastIndexOf("/");
if (lastSlash > -1 && lastSlash < fullyQualified.length() - 1) {
attributes.put(CoreAttributes.PATH.key(), fullyQualified.substring(0, lastSlash));
attributes.put(CoreAttributes.ABSOLUTE_PATH.key(), fullyQualified);
attributes.put(CoreAttributes.FILENAME.key(), fullyQualified.substring(lastSlash + 1));
} else {
attributes.put(CoreAttributes.FILENAME.key(), metadata.getContentDisposition());
}
}
if (metadata.getContentMD5() != null) {
attributes.put("hash.value", metadata.getContentMD5());
attributes.put("hash.algorithm", "MD5");
}
if (metadata.getContentType() != null) {
attributes.put(CoreAttributes.MIME_TYPE.key(), metadata.getContentType());
}
if (metadata.getETag() != null) {
attributes.put("s3.etag", metadata.getETag());
}
if (metadata.getExpirationTime() != null) {
attributes.put("s3.expirationTime", String.valueOf(metadata.getExpirationTime().getTime()));
}
if (metadata.getExpirationTimeRuleId() != null) {
attributes.put("s3.expirationTimeRuleId", metadata.getExpirationTimeRuleId());
}
if (metadata.getUserMetadata() != null) {
attributes.putAll(metadata.getUserMetadata());
}
if (metadata.getSSEAlgorithm() != null) {
attributes.put("s3.sseAlgorithm", metadata.getSSEAlgorithm());
}
if (metadata.getVersionId() != null) {
attributes.put("s3.version", metadata.getVersionId());
}
} catch (final IOException | AmazonClientException ioe) {
getLogger().error("Failed to retrieve S3 Object for {}; routing to failure", new Object[] { flowFile, ioe });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
return;
}
if (!attributes.isEmpty()) {
flowFile = session.putAllAttributes(flowFile, attributes);
}
session.transfer(flowFile, REL_SUCCESS);
final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
getLogger().info("Successfully retrieved S3 Object for {} in {} millis; routing to success", new Object[] { flowFile, transferMillis });
session.getProvenanceReporter().fetch(flowFile, "http://" + bucket + ".amazonaws.com/" + key, transferMillis);
}
use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project uPortal by Jasig.
the class AwsS3DynamicSkinService method saveContentToAwsS3Bucket.
private void saveContentToAwsS3Bucket(final String objectKey, final String content, final DynamicSkinInstanceData data) {
final InputStream inputStream = IOUtils.toInputStream(content);
final ObjectMetadata objectMetadata = this.createObjectMetadata(content, data);
final PutObjectRequest putObjectRequest = this.createPutObjectRequest(objectKey, inputStream, objectMetadata);
log.info(ATTEMPTING_TO_SAVE_FILE_TO_AWS_S3_LOG_MSG, this.awsS3BucketConfig.getBucketName(), objectKey);
this.saveContentToAwsS3Bucket(putObjectRequest);
log.info(FILE_SAVED_TO_AWS_S3_LOG_MSG, this.awsS3BucketConfig.getBucketName(), objectKey);
}
use of com.talend.shaded.com.amazonaws.services.s3.model.ObjectMetadata in project hippo by NHS-digital-website.
the class S3ConnectorImpl method uploadFile.
public S3ObjectMetadata uploadFile(InputStream fileStream, String fileName, String contentType) {
String objectKey = s3ObjectKeyGenerator.generateObjectKey(fileName);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(contentType);
// initialise multipart upload
InitiateMultipartUploadResult initResult = s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, objectKey, metadata));
// loop parts
List<PartETag> partETags;
try {
partETags = uploadParts(fileStream, bucketName, objectKey, initResult.getUploadId());
} catch (Exception ex) {
final String errorMessage = "Failed to upload file " + objectKey;
log.error(errorMessage, ex);
s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, objectKey, initResult.getUploadId()));
throw new RuntimeException(errorMessage, ex);
}
// finalise multipart upload
s3.completeMultipartUpload(new CompleteMultipartUploadRequest(bucketName, objectKey, initResult.getUploadId(), partETags));
// The above put request returns metadata object but it's empty,
// hence the need for a separate call to fetch actual metadata.
ObjectMetadata resultMetadata = s3.getObjectMetadata(bucketName, objectKey);
return new S3ObjectMetadataImpl(resultMetadata, bucketName, objectKey);
}
Aggregations