use of uk.nhs.digital.externalstorage.s3.S3ObjectMetadata in project hippo by NHS-digital-website.
the class ResourceUploadPlugin method handleUpload.
/**
* Handles the file upload from the form.
*
* @param upload the {@link FileUpload} containing the upload information
*/
private void handleUpload(FileUpload upload) throws FileUploadViolationException {
S3Connector s3Connector = HippoServiceRegistry.getService(S3Connector.class);
String fileName = upload.getClientFileName();
String mimeType = upload.getContentType();
JcrNodeModel nodeModel = (JcrNodeModel) this.getDefaultModel();
Node node = nodeModel.getNode();
try {
S3ObjectMetadata metadata = s3Connector.uploadFile(upload.getInputStream(), fileName, mimeType);
setResourceProperties(node, metadata);
} catch (Exception ex) {
log.error("Cannot upload resource", ex);
throw new FileUploadViolationException(ex.getMessage());
}
}
use of uk.nhs.digital.externalstorage.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doCreate.
private SegmentProperties doCreate(String streamSegmentName) throws StreamSegmentExistsException {
long traceId = LoggerHelpers.traceEnter(log, "create", streamSegmentName);
if (!client.listObjects(config.getBucket(), config.getRoot() + streamSegmentName).getObjects().isEmpty()) {
throw new StreamSegmentExistsException(streamSegmentName);
}
S3ObjectMetadata metadata = new S3ObjectMetadata();
metadata.setContentLength((long) 0);
PutObjectRequest request = new PutObjectRequest(config.getBucket(), config.getRoot() + streamSegmentName, null);
AccessControlList acl = new AccessControlList();
acl.addGrants(new Grant(new CanonicalUser(config.getAccessKey(), config.getAccessKey()), READ_WRITE_PERMISSION));
request.setAcl(acl);
/* Default behavior of putObject is to overwrite an existing object. This behavior can cause data loss.
* Here is one of the scenarios in which data loss is observed:
* 1. Host A owns the container and gets a create operation. It has not executed the putObject operation yet.
* 2. Ownership changes and host B becomes the owner of the container. It picks up putObject from the queue, executes it.
* 3. Host B gets a write operation which executes successfully.
* 4. Now host A schedules the putObject. This will overwrite the write by host B.
*
* The solution for this issue is to implement put-if-absent behavior by using Set-If-None-Match header as described here:
* http://www.emc.com/techpubs/api/ecs/v3-0-0-0/S3ObjectOperations_createOrUpdateObject_7916bd6f789d0ae0ff39961c0e660d00_ba672412ac371bb6cf4e69291344510e_detail.htm
* But this does not work. Currently all the calls to putObject API fail if made with reqest.setIfNoneMatch("*").
* once the issue with extended S3 API is fixed, addition of this one line will ensure put-if-absent semantics.
* See: https://github.com/pravega/pravega/issues/1564
*
* This issue is fixed in some versions of extended S3 implementation. The following code sets the IfNoneMatch
* flag based on configuration.
*/
if (config.isUseNoneMatch()) {
request.setIfNoneMatch("*");
}
client.putObject(request);
LoggerHelpers.traceLeave(log, "create", traceId);
return doGetStreamSegmentInfo(streamSegmentName);
}
use of uk.nhs.digital.externalstorage.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doConcat.
/**
* The concat is implemented using extended S3 implementation of multipart copy API. Please see here for
* more detail on multipart copy:
* http://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingLLJavaMPUapi.html
*
* The multipart copy is an atomic operation. We schedule two parts and commit them atomically using
* completeMultiPartUpload call. Specifically, to concatenate, we are copying the target segment T and the
* source segment S to T, so essentially we are doing T <- T + S.
*/
private Void doConcat(SegmentHandle targetHandle, long offset, String sourceSegment) throws StreamSegmentNotExistsException {
Preconditions.checkArgument(!targetHandle.isReadOnly(), "target handle must not be read-only.");
long traceId = LoggerHelpers.traceEnter(log, "concat", targetHandle.getSegmentName(), offset, sourceSegment);
SortedSet<MultipartPartETag> partEtags = new TreeSet<>();
String targetPath = config.getRoot() + targetHandle.getSegmentName();
String uploadId = client.initiateMultipartUpload(config.getBucket(), targetPath);
// check whether the target exists
if (!doExists(targetHandle.getSegmentName())) {
throw new StreamSegmentNotExistsException(targetHandle.getSegmentName());
}
// check whether the source is sealed
SegmentProperties si = doGetStreamSegmentInfo(sourceSegment);
Preconditions.checkState(si.isSealed(), "Cannot concat segment '%s' into '%s' because it is not sealed.", sourceSegment, targetHandle.getSegmentName());
// Copy the first part
CopyPartRequest copyRequest = new CopyPartRequest(config.getBucket(), targetPath, config.getBucket(), targetPath, uploadId, 1).withSourceRange(Range.fromOffsetLength(0, offset));
CopyPartResult copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
// Copy the second part
S3ObjectMetadata metadataResult = client.getObjectMetadata(config.getBucket(), config.getRoot() + sourceSegment);
// in bytes
long objectSize = metadataResult.getContentLength();
copyRequest = new CopyPartRequest(config.getBucket(), config.getRoot() + sourceSegment, config.getBucket(), targetPath, uploadId, 2).withSourceRange(Range.fromOffsetLength(0, objectSize));
copyResult = client.copyPart(copyRequest);
partEtags.add(new MultipartPartETag(copyResult.getPartNumber(), copyResult.getETag()));
// Close the upload
client.completeMultipartUpload(new CompleteMultipartUploadRequest(config.getBucket(), targetPath, uploadId).withParts(partEtags));
client.deleteObject(config.getBucket(), config.getRoot() + sourceSegment);
LoggerHelpers.traceLeave(log, "concat", traceId);
return null;
}
use of uk.nhs.digital.externalstorage.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doGetStreamSegmentInfo.
private StreamSegmentInformation doGetStreamSegmentInfo(String streamSegmentName) {
long traceId = LoggerHelpers.traceEnter(log, "getStreamSegmentInfo", streamSegmentName);
S3ObjectMetadata result = client.getObjectMetadata(config.getBucket(), config.getRoot() + streamSegmentName);
AccessControlList acls = client.getObjectAcl(config.getBucket(), config.getRoot() + streamSegmentName);
boolean canWrite = acls.getGrants().stream().anyMatch(grant -> grant.getPermission().compareTo(Permission.WRITE) >= 0);
StreamSegmentInformation information = StreamSegmentInformation.builder().name(streamSegmentName).length(result.getContentLength()).sealed(!canWrite).lastModified(new ImmutableDate(result.getLastModified().toInstant().toEpochMilli())).build();
LoggerHelpers.traceLeave(log, "getStreamSegmentInfo", traceId, streamSegmentName);
return information;
}
use of uk.nhs.digital.externalstorage.s3.S3ObjectMetadata in project pravega by pravega.
the class S3FileSystemImpl method getObjectMetadata.
@Override
public S3ObjectMetadata getObjectMetadata(String bucketName, String key) {
S3ObjectMetadata metadata = new S3ObjectMetadata();
AclSize data = aclMap.get(key);
if (data == null) {
throw new S3Exception("NoSuchKey", HttpStatus.SC_NOT_FOUND, "NoSuchKey", "");
}
metadata.setContentLength(data.getSize());
Path path = Paths.get(this.baseDir, bucketName, key);
metadata.setLastModified(new Date(path.toFile().lastModified()));
return metadata;
}
Aggregations