use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project mapfish-print by mapfish.
the class S3ReportStorage method createPutRequest.
private PutObjectRequest createPutRequest(final String ref, final String filename, final String extension, final String mimeType, final File file) {
final PutObjectRequest request = new PutObjectRequest(bucket, getKey(ref, filename, extension), file);
request.withCannedAcl(CannedAccessControlList.PublicRead);
final ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentType(mimeType);
request.withMetadata(metadata);
return request;
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project Synapse-Repository-Services by Sage-Bionetworks.
the class DataUploaderMultipartImpl method uploadDataMultiPart.
@Override
public void uploadDataMultiPart(S3Token s3Token, File dataFile) throws SynapseException {
// Formulate the request, note that S3 does not verify that the entire
// upload matches this md5, unlike the single part upload
String base64Md5;
try {
byte[] encoded = Base64.encodeBase64(Hex.decodeHex(s3Token.getMd5().toCharArray()));
base64Md5 = new String(encoded, "ASCII");
} catch (DecoderException ex) {
throw new SynapseException(ex);
} catch (UnsupportedEncodingException ex) {
throw new SynapseException(ex);
}
ObjectMetadata s3Metadata = new ObjectMetadata();
s3Metadata.setContentType(s3Token.getContentType());
s3Metadata.setContentMD5(base64Md5);
// S3 keys do not start with a slash but sometimes we are storing them
// that way in Synapse
String s3Key = (s3Token.getPath().startsWith("/")) ? s3Token.getPath().substring(1) : s3Token.getPath();
PutObjectRequest request = new PutObjectRequest(s3Token.getBucket(), s3Key, dataFile).withMetadata(s3Metadata);
if (null != progressListener) {
request.setProgressListener(progressListener);
}
request.setCannedAcl(CannedAccessControlList.BucketOwnerFullControl);
// Initiate the multipart uploas
AWSCredentials credentials = new BasicSessionCredentials(s3Token.getAccessKeyId(), s3Token.getSecretAccessKey(), s3Token.getSessionToken());
TransferManager tx = new TransferManager(credentials);
Upload upload = tx.upload(request);
if (null != progressListener) {
progressListener.setUpload(upload);
}
// synchronous, can change it later if we want asynchronous behavior)
try {
upload.waitForUploadResult();
} catch (Exception e) {
throw new SynapseException("AWS S3 multipart upload of " + dataFile + " failed", e);
}
tx.shutdownNow();
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project amazon-neptune-tools by awslabs.
the class ExportToS3NeptuneExportEventHandler method uploadCompletionFileToS3.
private void uploadCompletionFileToS3(TransferManager transferManager, File directory, S3ObjectInfo outputS3ObjectInfo, ExportStats stats, GraphSchema graphSchema) throws IOException {
if (StringUtils.isEmpty(completionFileS3Path)) {
return;
}
if (directory == null || !directory.exists()) {
logger.warn("Ignoring request to upload completion file to S3 because directory from which to upload files does not exist");
return;
}
String completionFilename = s3UploadParams.createExportSubdirectory() ? directory.getName() : String.valueOf(System.currentTimeMillis());
File completionFile = new File(localOutputPath, completionFilename + ".json");
ObjectNode neptuneExportNode = JsonNodeFactory.instance.objectNode();
completionFilePayload.set("neptuneExport", neptuneExportNode);
neptuneExportNode.put("outputS3Path", outputS3ObjectInfo.toString());
stats.addTo(neptuneExportNode, graphSchema);
for (CompletionFileWriter completionFileWriter : completionFileWriters) {
completionFileWriter.updateCompletionFile(completionFilePayload);
}
try (Writer writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(completionFile), UTF_8))) {
ObjectWriter objectWriter = new ObjectMapper().writer().withDefaultPrettyPrinter();
writer.write(objectWriter.writeValueAsString(completionFilePayload));
}
S3ObjectInfo completionFileS3ObjectInfo = new S3ObjectInfo(completionFileS3Path).replaceOrAppendKey("_COMPLETION_ID_", FilenameUtils.getBaseName(completionFile.getName()), completionFile.getName());
logger.info("Uploading completion file to {}", completionFileS3ObjectInfo.key());
try (InputStream inputStream = new FileInputStream(completionFile)) {
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(completionFile.length());
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
PutObjectRequest putObjectRequest = new PutObjectRequest(completionFileS3ObjectInfo.bucket(), completionFileS3ObjectInfo.key(), inputStream, objectMetadata).withTagging(createObjectTags(profiles));
Upload upload = transferManager.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
logger.warn(e.getMessage());
Thread.currentThread().interrupt();
}
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project amazon-neptune-tools by awslabs.
the class NeptuneMachineLearningExportEventHandlerV1 method uploadTrainingJobConfigurationFileToS3.
private void uploadTrainingJobConfigurationFileToS3(String filename, TransferManager transferManager, File trainingJobConfigurationFile, S3ObjectInfo outputS3ObjectInfo) throws IOException {
S3ObjectInfo s3ObjectInfo = outputS3ObjectInfo.withNewKeySuffix(filename);
try (InputStream inputStream = new FileInputStream(trainingJobConfigurationFile)) {
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setContentLength(trainingJobConfigurationFile.length());
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
PutObjectRequest putObjectRequest = new PutObjectRequest(s3ObjectInfo.bucket(), s3ObjectInfo.key(), inputStream, objectMetadata).withTagging(ExportToS3NeptuneExportEventHandler.createObjectTags(profiles));
Upload upload = transferManager.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
logger.warn(e.getMessage());
Thread.currentThread().interrupt();
}
}
use of com.ibm.watson.visual_recognition.v4.model.ObjectMetadata in project knime-cloud by knime.
the class S3RemoteFile method write.
/**
* Write the given remote file into this files location.
*
* This method will overwrite the old file if it exists.
*
* @param file Source remote file
* @param exec Execution context for <code>checkCanceled()</code> and
* <code>setProgress()</code>
* @throws Exception If the operation could not be executed
*/
@SuppressWarnings("rawtypes")
@Override
public void write(final RemoteFile file, final ExecutionContext exec) throws Exception {
// Default implementation using just remote file methods
try (final InputStream in = file.openInputStream()) {
final String uri = getURI().toString();
final ObjectMetadata metadata = new ObjectMetadata();
// Add SSEncryption --> See AP-8823
if (getConnection().useSSEncryption()) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
long fileSize = file.getSize();
metadata.setContentLength(fileSize);
final PutObjectRequest putRequest = new PutObjectRequest(getContainerName(), getBlobName(), in, metadata);
Upload upload = getTransferManager().upload(putRequest);
S3ProgressListener progressListener = new S3ProgressListener() {
long totalTransferred = 0;
@Override
public void progressChanged(final ProgressEvent progressEvent) {
totalTransferred += progressEvent.getBytesTransferred();
double percent = totalTransferred / (fileSize / 100);
exec.setProgress(percent / 100, () -> "Written: " + FileUtils.byteCountToDisplaySize(totalTransferred) + " to file " + uri);
}
@Override
public void onPersistableTransfer(final PersistableTransfer persistableTransfer) {
// Not used since we are not going to pause/unpause upload
}
};
upload.addProgressListener(progressListener);
upload.waitForCompletion();
} catch (InterruptedException e) {
// removes uploaded parts of failed uploads on given bucket uploaded before now
final Date now = new Date(System.currentTimeMillis());
getTransferManager().abortMultipartUploads(getContainerName(), now);
// check if canceled, otherwise throw exception
exec.checkCanceled();
throw e;
} catch (AmazonS3Exception amazonException) {
throw new KnimeS3Exception(amazonException);
}
}
Aggregations