use of com.formkiq.aws.s3.S3ObjectMetadata in project hippo by NHS-digital-website.
the class S3StorageManager method uploadFileToS3.
public S3ObjectMetadata uploadFileToS3(String docbase, String sourceFilePath) {
FilePathData sourceFilePathData = new FilePathData(docbase, sourceFilePath);
String targetFileName = sourceFilePathData.getFilename();
S3ObjectMetadata metaData = null;
if (sourceFilePathData.isS3Protocol()) {
String sourceBucketName = sourceFilePathData.getS3Bucketname();
String noBucketSourceFilePath = sourceFilePathData.getFilePathNoBucket();
if (getS3Connector().doesObjectExist(sourceBucketName, noBucketSourceFilePath)) {
metaData = getS3Connector().copyFileFromOtherBucket(noBucketSourceFilePath, sourceBucketName, targetFileName);
}
}
return metaData;
}
use of com.formkiq.aws.s3.S3ObjectMetadata in project hippo by NHS-digital-website.
the class AbstractTransformer method populateAndCreateExternalAttachmentNode.
/**
* External files are stored in S3 and this method will create the structure of the Nodes to support that
* @param contentNode is the {@link ContentNode} to which we will add this information
* @param nodeTypeName is the modeType, as known by the JCR. It is part of the PUBLICATION_SYSTEM namespace
* @param displayName is a displayName used for the node
* @param resource is the location, as an S3 URL, of the source file that is being uploaded
* @param childNodeName is the name of the subordinate node with resource details
*/
protected void populateAndCreateExternalAttachmentNode(ContentNode contentNode, String nodeTypeName, String displayName, String resource, String childNodeName) {
ContentNode attachmentNode = new ContentNode(PUBLICATION_SYSTEM + nodeTypeName, PUBLICATIONSYSTEM_EXTATTACHMENT);
attachmentNode.setProperty(PUBLICATIONSYSTEM_DISPLAYNAME, displayName);
contentNode.addNode(attachmentNode);
S3ObjectMetadata s3meta = storageManager.uploadFileToS3(docbase, resource);
ContentNode resourceNode = new ContentNode(childNodeName, EXTERNALSTORAGE_RESOURCE);
resourceNode.setProperty(EXTERNALSTORAGE_SIZE, ContentPropertyType.LONG, "10000");
resourceNode.setProperty(EXTERNALSTORAGE_URL, s3meta.getUrl());
resourceNode.setProperty(EXTERNALSTROAGE_REFERENCE, s3meta.getReference());
addFileRelatedProperties(resourceNode, new BinaryValue(new byte[0]), s3meta.getMimeType(), s3meta.getFileName());
attachmentNode.addNode(resourceNode);
}
use of com.formkiq.aws.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doConcatWithMultipartUpload.
private void doConcatWithMultipartUpload(String targetPath, String sourceSegment, long offset) {
String uploadId = client.initiateMultipartUpload(config.getBucket(), targetPath);
SortedSet<MultipartPartETag> partEtags = new TreeSet<>();
// Copy the first part
CopyPartRequest copyRequest = new CopyPartRequest(config.getBucket(), targetPath, config.getBucket(), targetPath, uploadId, 1).withSourceRange(Range.fromOffsetLength(0, offset));
CopyPartResult copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
// Copy the second part
S3ObjectMetadata metadataResult = client.getObjectMetadata(config.getBucket(), config.getPrefix() + sourceSegment);
// in bytes
long objectSize = metadataResult.getContentLength();
copyRequest = new CopyPartRequest(config.getBucket(), config.getPrefix() + sourceSegment, config.getBucket(), targetPath, uploadId, 2).withSourceRange(Range.fromOffsetLength(0, objectSize));
copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
// Close the upload
client.completeMultipartUpload(new CompleteMultipartUploadRequest(config.getBucket(), targetPath, uploadId).withParts(partEtags));
}
use of com.formkiq.aws.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3ChunkStorage method doConcat.
@Override
public int doConcat(ConcatArgument[] chunks) throws ChunkStorageException {
int totalBytesConcatenated = 0;
String targetPath = getObjectPath(chunks[0].getName());
String uploadId = null;
boolean isCompleted = false;
try {
int partNumber = 1;
SortedSet<MultipartPartETag> partEtags = new TreeSet<>();
uploadId = client.initiateMultipartUpload(config.getBucket(), targetPath);
// check whether the target exists
if (!checkExists(chunks[0].getName())) {
throw new ChunkNotFoundException(chunks[0].getName(), "doConcat - Target segment does not exist");
}
// Copy the parts
for (int i = 0; i < chunks.length; i++) {
if (0 != chunks[i].getLength()) {
val sourceHandle = chunks[i];
S3ObjectMetadata metadataResult = client.getObjectMetadata(config.getBucket(), getObjectPath(sourceHandle.getName()));
// in bytes
long objectSize = metadataResult.getContentLength();
Preconditions.checkState(objectSize >= chunks[i].getLength());
CopyPartRequest copyRequest = new CopyPartRequest(config.getBucket(), getObjectPath(sourceHandle.getName()), config.getBucket(), targetPath, uploadId, partNumber++).withSourceRange(Range.fromOffsetLength(0, chunks[i].getLength()));
CopyPartResult copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
totalBytesConcatenated += chunks[i].getLength();
}
}
// Close the upload
client.completeMultipartUpload(new CompleteMultipartUploadRequest(config.getBucket(), targetPath, uploadId).withParts(partEtags));
isCompleted = true;
} catch (RuntimeException e) {
// Error message is REC_CATCH_EXCEPTION: Exception is caught when Exception is not thrown
throw convertException(chunks[0].getName(), "doConcat", e);
} catch (Exception e) {
throw convertException(chunks[0].getName(), "doConcat", e);
} finally {
if (!isCompleted && null != uploadId) {
client.abortMultipartUpload(new AbortMultipartUploadRequest(config.getBucket(), targetPath, uploadId));
}
}
return totalBytesConcatenated;
}
use of com.formkiq.aws.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doCreate.
private SegmentProperties doCreate(String streamSegmentName) throws StreamSegmentExistsException {
long traceId = LoggerHelpers.traceEnter(log, "create", streamSegmentName);
if (!client.listObjects(config.getBucket(), config.getRoot() + streamSegmentName).getObjects().isEmpty()) {
throw new StreamSegmentExistsException(streamSegmentName);
}
S3ObjectMetadata metadata = new S3ObjectMetadata();
metadata.setContentLength((long) 0);
PutObjectRequest request = new PutObjectRequest(config.getBucket(), config.getRoot() + streamSegmentName, null);
AccessControlList acl = new AccessControlList();
acl.addGrants(new Grant(new CanonicalUser(config.getAccessKey(), config.getAccessKey()), READ_WRITE_PERMISSION));
request.setAcl(acl);
/* Default behavior of putObject is to overwrite an existing object. This behavior can cause data loss.
* Here is one of the scenarios in which data loss is observed:
* 1. Host A owns the container and gets a create operation. It has not executed the putObject operation yet.
* 2. Ownership changes and host B becomes the owner of the container. It picks up putObject from the queue, executes it.
* 3. Host B gets a write operation which executes successfully.
* 4. Now host A schedules the putObject. This will overwrite the write by host B.
*
* The solution for this issue is to implement put-if-absent behavior by using Set-If-None-Match header as described here:
* http://www.emc.com/techpubs/api/ecs/v3-0-0-0/S3ObjectOperations_createOrUpdateObject_7916bd6f789d0ae0ff39961c0e660d00_ba672412ac371bb6cf4e69291344510e_detail.htm
* But this does not work. Currently all the calls to putObject API fail if made with reqest.setIfNoneMatch("*").
* once the issue with extended S3 API is fixed, addition of this one line will ensure put-if-absent semantics.
* See: https://github.com/pravega/pravega/issues/1564
*
* This issue is fixed in some versions of extended S3 implementation. The following code sets the IfNoneMatch
* flag based on configuration.
*/
if (config.isUseNoneMatch()) {
request.setIfNoneMatch("*");
}
client.putObject(request);
LoggerHelpers.traceLeave(log, "create", traceId);
return doGetStreamSegmentInfo(streamSegmentName);
}
Aggregations