use of com.amazonaws.s3.model.PutObjectRequest in project esop by instaclustr.
the class BaseS3Backuper method uploadText.
@Override
public void uploadText(final String text, final RemoteObjectReference objectReference) throws Exception {
final S3RemoteObjectReference s3RemoteObjectReference = (S3RemoteObjectReference) objectReference;
final PutObjectRequest putObjectRequest = new PutObjectRequest(request.storageLocation.bucket, s3RemoteObjectReference.canonicalPath, new ByteArrayInputStream(text.getBytes()), new ObjectMetadata() {
{
setContentLength(text.getBytes().length);
}
});
transferManager.upload(putObjectRequest, new UploadProgressListener(s3RemoteObjectReference)).waitForCompletion();
}
use of com.amazonaws.s3.model.PutObjectRequest in project esop by instaclustr.
the class BaseS3Backuper method uploadFile.
@Override
public void uploadFile(final long size, final InputStream localFileStream, final RemoteObjectReference objectReference) throws Exception {
final S3RemoteObjectReference s3RemoteObjectReference = (S3RemoteObjectReference) objectReference;
final PutObjectRequest putObjectRequest = new PutObjectRequest(request.storageLocation.bucket, s3RemoteObjectReference.canonicalPath, localFileStream, new ObjectMetadata() {
{
setContentLength(size);
}
});
transferManager.upload(putObjectRequest, new UploadProgressListener(s3RemoteObjectReference)).waitForCompletion();
}
use of com.amazonaws.s3.model.PutObjectRequest in project solarnetwork-central by SolarNetwork.
the class S3DatumExportDestinationService method export.
@Override
public void export(Configuration config, Iterable<DatumExportResource> resources, Map<String, ?> runtimeProperties, ProgressListener<DatumExportService> progressListener) throws IOException {
if (config == null) {
throw new IOException("No configuration provided.");
}
if (resources == null) {
throw new IOException("No export resource provided.");
}
DestinationConfiguration destConfig = config.getDestinationConfiguration();
if (destConfig == null) {
throw new IOException("No destination configuration provided.");
}
S3DestinationProperties props = new S3DestinationProperties();
ClassUtils.setBeanProperties(props, destConfig.getServiceProperties(), true);
if (!props.isValid()) {
throw new IOException("Service configuration is not valid.");
}
List<DatumExportResource> resourceList = StreamSupport.stream(resources.spliterator(), false).collect(Collectors.toList());
final int resourceCount = resourceList.size();
final com.amazonaws.event.ProgressListener s3ProgressListener = new com.amazonaws.event.ProgressListener() {
private double overallProgress = 0;
@Override
public void progressChanged(ProgressEvent progressEvent) {
ProgressEventType type = progressEvent.getEventType();
if (type == ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT) {
double resourceProgress = (double) progressEvent.getBytesTransferred() / (double) progressEvent.getBytes();
overallProgress += (resourceProgress / resourceCount);
progressListener.progressChanged(S3DatumExportDestinationService.this, overallProgress);
}
}
};
AmazonS3 client = getClient(props);
AmazonS3URI uri = props.getUri();
for (ListIterator<DatumExportResource> itr = resourceList.listIterator(); itr.hasNext(); ) {
// if we have >1 resource to upload, then we'll insert an index suffix in the key name, like -1, -2, etc.
int idx = (resourceCount > 1 ? itr.nextIndex() + 1 : 0);
DatumExportResource resource = itr.next();
String key = getDestinationPath(props, runtimeProperties, idx);
ObjectMetadata objectMetadata = new ObjectMetadata();
if (resource.getContentType() != null) {
objectMetadata.setContentType(resource.getContentType());
}
objectMetadata.setContentLength(resource.contentLength());
objectMetadata.setLastModified(new Date(resource.lastModified()));
try (InputStream in = resource.getInputStream()) {
PutObjectRequest req = new PutObjectRequest(uri.getBucket(), key, in, objectMetadata);
if (props.getStorageClass() != null) {
req.setStorageClass(props.getStorageClass());
}
if (progressListener != null) {
req.withGeneralProgressListener(s3ProgressListener);
}
client.putObject(req);
} catch (AmazonServiceException e) {
log.warn("AWS error: {}; HTTP code {}; AWS code {}; type {}; request ID {}", e.getMessage(), e.getStatusCode(), e.getErrorCode(), e.getErrorType(), e.getRequestId());
throw new RemoteServiceException("Error putting S3 object at " + key, e);
} catch (AmazonClientException e) {
log.debug("Error communicating with AWS: {}", e.getMessage());
throw new IOException("Error communicating with AWS", e);
}
}
}
use of com.amazonaws.s3.model.PutObjectRequest in project trellis-extensions by trellis-ldp.
the class S3BinaryService method bufferUpload.
private void bufferUpload(final BinaryMetadata metadata, final InputStream stream, final Path path) throws IOException {
// Buffer the file locally so that the PUT request can be parallelized for large objects
try {
try (final OutputStream output = Files.newOutputStream(path, WRITE)) {
IOUtils.copy(stream, output);
}
final ObjectMetadata md = new ObjectMetadata();
metadata.getMimeType().ifPresent(md::setContentType);
final PutObjectRequest req = new PutObjectRequest(bucketName, getKey(metadata.getIdentifier()), path.toFile()).withMetadata(md);
client.putObject(req);
} finally {
Files.delete(path);
}
}
use of com.amazonaws.s3.model.PutObjectRequest in project trellis-extensions by trellis-ldp.
the class S3MementoService method put.
@Override
public CompletionStage<Void> put(final Resource resource) {
return runAsync(() -> {
try {
final File file = createTempFile("trellis-memento-", ".nq");
file.deleteOnExit();
final Map<String, String> metadata = new HashMap<>();
metadata.put(S3Resource.INTERACTION_MODEL, resource.getInteractionModel().getIRIString());
metadata.put(S3Resource.MODIFIED, resource.getModified().toString());
resource.getContainer().map(IRI::getIRIString).ifPresent(c -> metadata.put(S3Resource.CONTAINER, c));
resource.getBinaryMetadata().ifPresent(b -> {
metadata.put(S3Resource.BINARY_LOCATION, b.getIdentifier().getIRIString());
b.getMimeType().ifPresent(m -> metadata.put(S3Resource.BINARY_TYPE, m));
});
resource.getMembershipResource().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.MEMBERSHIP_RESOURCE, m));
resource.getMemberRelation().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.MEMBER_RELATION, m));
resource.getMemberOfRelation().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.MEMBER_OF_RELATION, m));
resource.getInsertedContentRelation().map(IRI::getIRIString).ifPresent(m -> metadata.put(S3Resource.INSERTED_CONTENT_RELATION, m));
try (final Dataset dataset = rdf.createDataset();
final OutputStream output = Files.newOutputStream(file.toPath());
final Stream<? extends Quad> quads = resource.stream()) {
quads.forEachOrdered(dataset::add);
metadata.put(S3Resource.METADATA_GRAPHS, dataset.getGraphNames().filter(IRI.class::isInstance).map(IRI.class::cast).filter(graph -> !IGNORE.contains(graph)).map(IRI::getIRIString).collect(joining(",")));
RDFDataMgr.write(output, toJena(dataset), NQUADS);
}
final ObjectMetadata md = new ObjectMetadata();
md.setContentType("application/n-quads");
md.setUserMetadata(metadata);
final PutObjectRequest req = new PutObjectRequest(bucketName, getKey(resource.getIdentifier(), resource.getModified().truncatedTo(SECONDS)), file);
client.putObject(req.withMetadata(md));
Files.delete(file.toPath());
} catch (final Exception ex) {
throw new TrellisRuntimeException("Error deleting locally buffered file", ex);
}
});
}
Aggregations