use of com.amazonaws.services.s3.model.ObjectMetadata in project bayou by capergroup.
the class S3LoggerBase method putToS3.
/**
* Performs an S3 Put Object operation storing the UTF-8 bytes of logMsg under the given key
* using construction provided AWS credentials.
*
* @param objectKey the S3 object key. may not be null or whitespace only.
* @param logMsg the message to store
* @throws IllegalArgumentException if objectKey is whitespace only.
*/
void putToS3(String objectKey, String logMsg) {
if (objectKey == null)
throw new NullPointerException("objectKey");
if (objectKey.trim().length() == 0)
throw new IllegalArgumentException("objectKey may not be only whitespace.");
/*
* Make the client used to send the log msg to S3.
*/
AmazonS3 client;
{
Regions regions = Regions.US_EAST_1;
if (_credentials == null) {
client = // get creds from environment
AmazonS3ClientBuilder.standard().withRegion(regions).build();
} else {
client = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(_credentials)).withRegion(regions).build();
}
}
/*
* Store the log msg in S3.
*/
byte[] logMsgBytes = logMsg.getBytes(StandardCharsets.UTF_8);
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(logMsgBytes.length);
client.putObject(_bucketName, objectKey, new ByteArrayInputStream(logMsgBytes), metadata);
_logger.debug("exiting");
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project apex-malhar by apache.
the class S3BlockUploadOperator method uploadBlockIntoS3.
/**
* Upload the block into S3 bucket.
* @param tuple block data
*/
protected void uploadBlockIntoS3(AbstractBlockReader.ReaderRecord<Slice> tuple) {
if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
return;
}
// Check whether the block metadata is present for this block
if (blockIdToFilePath.get(tuple.getBlockId()) == null) {
if (!waitingTuples.contains(tuple)) {
waitingTuples.add(tuple);
}
return;
}
String uniqueBlockId = getUniqueBlockIdFromFile(tuple.getBlockId(), blockIdToFilePath.get(tuple.getBlockId()));
S3BlockMetaData metaData = blockInfo.get(uniqueBlockId);
// Check whether the file metadata is received
if (metaData == null) {
if (!waitingTuples.contains(tuple)) {
waitingTuples.add(tuple);
}
return;
}
long partSize = tuple.getRecord().length;
PartETag partETag = null;
ByteArrayInputStream bis = new ByteArrayInputStream(tuple.getRecord().buffer);
// Check if it is a Single block of a file
if (metaData.isLastBlock && metaData.partNo == 1) {
ObjectMetadata omd = createObjectMetadata();
omd.setContentLength(partSize);
PutObjectResult result = s3Client.putObject(new PutObjectRequest(bucketName, metaData.getKeyName(), bis, omd));
partETag = new PartETag(1, result.getETag());
} else {
// Else upload use multi-part feature
try {
// Create request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName).withKey(metaData.getKeyName()).withUploadId(metaData.getUploadId()).withPartNumber(metaData.getPartNo()).withInputStream(bis).withPartSize(partSize);
partETag = s3Client.uploadPart(uploadRequest).getPartETag();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
UploadBlockMetadata uploadmetadata = new UploadBlockMetadata(partETag, metaData.getKeyName());
output.emit(uploadmetadata);
currentWindowRecoveryState.put(uniqueBlockId, uploadmetadata);
try {
bis.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project stocator by SparkTC.
the class COSAPIClient method newPutObjectRequest.
/**
* Create a putObject request.
* Adds the ACL and metadata
* @param key key of object
* @param metadata metadata header
* @param srcfile source file
* @return the request
*/
public PutObjectRequest newPutObjectRequest(String key, ObjectMetadata metadata, File srcfile) {
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, key, srcfile);
putObjectRequest.setMetadata(metadata);
return putObjectRequest;
}
use of com.amazonaws.services.s3.model.ObjectMetadata in project stocator by SparkTC.
the class COSAPIClient method createObject.
@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata, Statistics statistics) throws IOException {
LOG.debug("Create object {}", objName);
try {
String objNameWithoutBuket = objName;
if (objName.startsWith(mBucket + "/")) {
objNameWithoutBuket = objName.substring(mBucket.length() + 1);
}
if (blockUploadEnabled) {
return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBuket, new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true), partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBuket), metadata), null);
}
if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
return new FSDataOutputStream(new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this), statistics);
} else {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(0L);
om.setContentType(contentType);
om.setUserMetadata(metadata);
// Remove the bucket name prefix from key path
if (objName.startsWith(mBucket + "/")) {
objName = objName.substring(mBucket.length() + 1);
}
/*
if (!objName.endsWith("/")) {
objName = objName + "/";
}*/
LOG.debug("bucket: {}, key {}", mBucket, objName);
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
OutputStream fakeStream = new OutputStream() {
@Override
public void write(int b) throws IOException {
}
@Override
public void close() throws IOException {
super.close();
}
};
return new FSDataOutputStream(fakeStream, statistics);
}
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted creating " + objName);
} catch (IOException e) {
LOG.error(e.getMessage());
throw e;
}
}
Aggregations