use of com.amazonaws.services.s3.model.ObjectMetadata in project stocator by SparkTC.
the class COSOutputStream method close.
@Override
public void close() throws IOException {
if (closed.getAndSet(true)) {
return;
}
mBackupOutputStream.close();
LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
try {
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(mBackupFile.length());
om.setContentType(mContentType);
om.setUserMetadata(mMetadata);
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
} catch (AmazonClientException e) {
throw new IOException(String.format("saving output %s %s", mKey, e));
} finally {
if (!mBackupFile.delete()) {
LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
}
super.close();
}
LOG.debug("OutputStream for key '{}' upload complete", mKey);
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project apex-malhar by apache.
the class S3Reconciler method processCommittedData.
/**
* Uploads the file on Amazon S3 using putObject API from S3 client
*/
@Override
protected void processCommittedData(FSRecordCompactionOperator.OutputMetaData outputMetaData) {
try {
Path path = new Path(outputMetaData.getPath());
if (fs.exists(path) == false) {
logger.debug("Ignoring non-existent path assuming replay : {}", path);
return;
}
FSDataInputStream fsinput = fs.open(path);
ObjectMetadata omd = new ObjectMetadata();
omd.setContentLength(outputMetaData.getSize());
String keyName = directoryName + Path.SEPARATOR + outputMetaData.getFileName();
PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd);
if (outputMetaData.getSize() < Integer.MAX_VALUE) {
request.getRequestClientOptions().setReadLimit((int) outputMetaData.getSize());
} else {
throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE");
}
if (fs.exists(path)) {
PutObjectResult result = s3client.putObject(request);
logger.debug("File {} Uploaded at {}", keyName, result.getETag());
}
} catch (FileNotFoundException e) {
logger.debug("Ignoring non-existent path assuming replay : {}", outputMetaData.getPath());
} catch (IOException e) {
logger.error("Unable to create Stream: {}", e.getMessage());
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project elasticsearch by elastic.
the class DefaultS3OutputStream method doInitialize.
protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName, boolean serverSideEncryption) {
InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName).withCannedACL(blobStore.getCannedACL()).withStorageClass(blobStore.getStorageClass());
if (serverSideEncryption) {
ObjectMetadata md = new ObjectMetadata();
md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
request.setObjectMetadata(md);
}
return blobStore.client().initiateMultipartUpload(request).getUploadId();
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project zeppelin by apache.
the class S3NotebookRepo method save.
@Override
public void save(Note note, AuthenticationInfo subject) throws IOException {
GsonBuilder gsonBuilder = new GsonBuilder();
gsonBuilder.setPrettyPrinting();
Gson gson = gsonBuilder.create();
String json = gson.toJson(note);
String key = user + "/" + "notebook" + "/" + note.getId() + "/" + "note.json";
File file = File.createTempFile("note", "json");
try {
Writer writer = new OutputStreamWriter(new FileOutputStream(file));
writer.write(json);
writer.close();
PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file);
if (useServerSideEncryption) {
// Request server-side encryption.
ObjectMetadata objectMetadata = new ObjectMetadata();
objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
putRequest.setMetadata(objectMetadata);
}
s3client.putObject(putRequest);
} catch (AmazonClientException ace) {
throw new IOException("Unable to store note in S3: " + ace, ace);
} finally {
FileUtils.deleteQuietly(file);
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project YCSB by brianfrankcooper.
the class S3Client method writeToStorage.
/**
* Upload a new object to S3 or update an object on S3.
*
* @param bucket
* The name of the bucket
* @param key
* The file key of the object to upload/update.
* @param values
* The data to be written on the object
* @param updateMarker
* A boolean value. If true a new object will be uploaded
* to S3. If false an existing object will be re-uploaded
*
*/
protected Status writeToStorage(String bucket, String key, HashMap<String, ByteIterator> values, Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) {
int totalSize = 0;
//number of fields to concatenate
int fieldCount = values.size();
// getting the first field in the values
Object keyToSearch = values.keySet().toArray()[0];
// getting the content of just one field
byte[] sourceArray = values.get(keyToSearch).toArray();
//size of each array
int sizeArray = sourceArray.length;
if (updateMarker) {
totalSize = sizeArray * fieldCount;
} else {
try {
Map.Entry<S3Object, ObjectMetadata> objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal);
int sizeOfFile = (int) objectAndMetadata.getValue().getContentLength();
fieldCount = sizeOfFile / sizeArray;
totalSize = sizeOfFile;
objectAndMetadata.getKey().close();
} catch (Exception e) {
System.err.println("Not possible to get the object :" + key);
e.printStackTrace();
return Status.ERROR;
}
}
byte[] destinationArray = new byte[totalSize];
int offset = 0;
for (int i = 0; i < fieldCount; i++) {
System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray);
offset += sizeArray;
}
try (InputStream input = new ByteArrayInputStream(destinationArray)) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(totalSize);
PutObjectRequest putObjectRequest = null;
if (sseLocal.equals("true")) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
} else if (ssecLocal != null) {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal);
} else {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
}
try {
PutObjectResult res = s3Client.putObject(putObjectRequest);
if (res.getETag() == null) {
return Status.ERROR;
} else {
if (sseLocal.equals("true")) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
} else if (ssecLocal != null) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
}
}
} catch (Exception e) {
System.err.println("Not possible to write object :" + key);
e.printStackTrace();
return Status.ERROR;
}
} catch (Exception e) {
System.err.println("Error in the creation of the stream :" + e.toString());
e.printStackTrace();
return Status.ERROR;
}
return Status.OK;
}
Aggregations