use of com.emc.object.s3.S3ObjectMetadata in project hippo by NHS-digital-website.
the class ExternalFileCopyTaskTest method newResourceNode.
private Node newResourceNode(final Node copiedDocUnpublishedVariant, final String pathSuffix, final String oldS3ObjectReference, final String newS3ObjectReference, final String newS3ObjectUrl, final String fileName) throws RepositoryException {
final Node copiedDocExtResourceNodeA = copiedDocUnpublishedVariant.addNode("copied-doc-ext-attachment" + pathSuffix, "publicationsystem:extattachment").addNode("copied-doc-ext-resource", "externalstorage:resource");
copiedDocExtResourceNodeA.setProperty(PROPERTY_EXTERNAL_STORAGE_FILE_NAME, fileName);
copiedDocExtResourceNodeA.setProperty(PROPERTY_EXTERNAL_STORAGE_REFERENCE, oldS3ObjectReference);
final S3ObjectMetadata s3ObjectMetadataA = mock(S3ObjectMetadata.class);
given(s3ObjectMetadataA.getReference()).willReturn(newS3ObjectReference);
given(s3ObjectMetadataA.getUrl()).willReturn(newS3ObjectUrl);
given(s3Connector.copyFile(oldS3ObjectReference, fileName)).willReturn(s3ObjectMetadataA);
return copiedDocExtResourceNodeA;
}
use of com.emc.object.s3.S3ObjectMetadata in project hippo by NHS-digital-website.
the class ResourceUploadPlugin method handleUpload.
/**
* Handles the file upload from the form.
*
* @param upload the {@link FileUpload} containing the upload information
*/
private void handleUpload(FileUpload upload) throws FileUploadViolationException {
final PooledS3Connector s3Connector = HippoServiceRegistry.getService(PooledS3Connector.class);
String fileName = upload.getClientFileName();
String mimeType = upload.getContentType();
try {
final S3ObjectMetadata s3ObjectMetadata = s3Connector.upload(wrapCheckedException(upload::getInputStream), fileName, mimeType);
JcrNodeModel nodeModel = (JcrNodeModel) this.getDefaultModel();
Node node = nodeModel.getNode();
try {
setResourceProperties(node, s3ObjectMetadata);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
} catch (Exception ex) {
log.error("Cannot upload resource", ex);
throw new FileUploadViolationException(ex.getMessage());
}
}
use of com.emc.object.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doCreate.
private SegmentHandle doCreate(String streamSegmentName) throws StreamSegmentExistsException {
long traceId = LoggerHelpers.traceEnter(log, "create", streamSegmentName);
Timer timer = new Timer();
if (!client.listObjects(config.getBucket(), config.getPrefix() + streamSegmentName).getObjects().isEmpty()) {
throw new StreamSegmentExistsException(streamSegmentName);
}
S3ObjectMetadata metadata = new S3ObjectMetadata();
metadata.setContentLength((long) 0);
PutObjectRequest request = new PutObjectRequest(config.getBucket(), config.getPrefix() + streamSegmentName, null);
AccessControlList acl = new AccessControlList();
acl.addGrants(new Grant(new CanonicalUser(config.getAccessKey(), config.getAccessKey()), READ_WRITE_PERMISSION));
request.setAcl(acl);
/* Default behavior of putObject is to overwrite an existing object. This behavior can cause data loss.
* Here is one of the scenarios in which data loss is observed:
* 1. Host A owns the container and gets a create operation. It has not executed the putObject operation yet.
* 2. Ownership changes and host B becomes the owner of the container. It picks up putObject from the queue, executes it.
* 3. Host B gets a write operation which executes successfully.
* 4. Now host A schedules the putObject. This will overwrite the write by host B.
*
* The solution for this issue is to implement put-if-absent behavior by using Set-If-None-Match header as described here:
* http://www.emc.com/techpubs/api/ecs/v3-0-0-0/S3ObjectOperations_createOrUpdateObject_7916bd6f789d0ae0ff39961c0e660d00_ba672412ac371bb6cf4e69291344510e_detail.htm
* But this does not work. Currently all the calls to putObject API fail if made with reqest.setIfNoneMatch("*").
* once the issue with extended S3 API is fixed, addition of this one line will ensure put-if-absent semantics.
* See: https://github.com/pravega/pravega/issues/1564
*
* This issue is fixed in some versions of extended S3 implementation. The following code sets the IfNoneMatch
* flag based on configuration.
*/
if (config.isUseNoneMatch()) {
request.setIfNoneMatch("*");
}
client.putObject(request);
Duration elapsed = timer.getElapsed();
ExtendedS3Metrics.CREATE_LATENCY.reportSuccessEvent(elapsed);
ExtendedS3Metrics.CREATE_COUNT.inc();
log.debug("Create segment={} latency={}.", streamSegmentName, elapsed.toMillis());
LoggerHelpers.traceLeave(log, "create", traceId);
return ExtendedS3SegmentHandle.getWriteHandle(streamSegmentName);
}
use of com.emc.object.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3Storage method doGetStreamSegmentInfo.
private StreamSegmentInformation doGetStreamSegmentInfo(String streamSegmentName) {
long traceId = LoggerHelpers.traceEnter(log, "getStreamSegmentInfo", streamSegmentName);
S3ObjectMetadata result = client.getObjectMetadata(config.getBucket(), config.getPrefix() + streamSegmentName);
AccessControlList acls = client.getObjectAcl(config.getBucket(), config.getPrefix() + streamSegmentName);
boolean canWrite = acls.getGrants().stream().anyMatch(grant -> grant.getPermission().compareTo(Permission.WRITE) >= 0);
StreamSegmentInformation information = StreamSegmentInformation.builder().name(streamSegmentName).length(result.getContentLength()).sealed(!canWrite).lastModified(new ImmutableDate(result.getLastModified().toInstant().toEpochMilli())).build();
LoggerHelpers.traceLeave(log, "getStreamSegmentInfo", traceId, streamSegmentName);
return information;
}
use of com.emc.object.s3.S3ObjectMetadata in project pravega by pravega.
the class ExtendedS3ChunkStorage method doCreate.
@Override
protected ChunkHandle doCreate(String chunkName) throws ChunkStorageException {
Preconditions.checkState(supportsAppend, "supportsAppend is false.");
try {
if (!client.listObjects(config.getBucket(), getObjectPath(chunkName)).getObjects().isEmpty()) {
throw new ChunkAlreadyExistsException(chunkName, "Chunk already exists");
}
S3ObjectMetadata metadata = new S3ObjectMetadata();
metadata.setContentLength((long) 0);
PutObjectRequest request = new PutObjectRequest(config.getBucket(), getObjectPath(chunkName), null).withObjectMetadata(metadata);
if (config.isUseNoneMatch()) {
request.setIfNoneMatch("*");
}
client.putObject(request);
return ChunkHandle.writeHandle(chunkName);
} catch (Exception e) {
throw convertException(chunkName, "doCreate", e);
}
}
Aggregations