use of com.amazonaws.services.s3.transfer.Upload in project YCSB by brianfrankcooper.
the class S3Client method writeToStorage.
/**
* Upload a new object to S3 or update an object on S3.
*
* @param bucket
* The name of the bucket
* @param key
* The file key of the object to upload/update.
* @param values
* The data to be written on the object
* @param updateMarker
* A boolean value. If true a new object will be uploaded
* to S3. If false an existing object will be re-uploaded
*
*/
protected Status writeToStorage(String bucket, String key, HashMap<String, ByteIterator> values, Boolean updateMarker, String sseLocal, SSECustomerKey ssecLocal) {
int totalSize = 0;
//number of fields to concatenate
int fieldCount = values.size();
// getting the first field in the values
Object keyToSearch = values.keySet().toArray()[0];
// getting the content of just one field
byte[] sourceArray = values.get(keyToSearch).toArray();
//size of each array
int sizeArray = sourceArray.length;
if (updateMarker) {
totalSize = sizeArray * fieldCount;
} else {
try {
Map.Entry<S3Object, ObjectMetadata> objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal);
int sizeOfFile = (int) objectAndMetadata.getValue().getContentLength();
fieldCount = sizeOfFile / sizeArray;
totalSize = sizeOfFile;
objectAndMetadata.getKey().close();
} catch (Exception e) {
System.err.println("Not possible to get the object :" + key);
e.printStackTrace();
return Status.ERROR;
}
}
byte[] destinationArray = new byte[totalSize];
int offset = 0;
for (int i = 0; i < fieldCount; i++) {
System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray);
offset += sizeArray;
}
try (InputStream input = new ByteArrayInputStream(destinationArray)) {
ObjectMetadata metadata = new ObjectMetadata();
metadata.setContentLength(totalSize);
PutObjectRequest putObjectRequest = null;
if (sseLocal.equals("true")) {
metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
} else if (ssecLocal != null) {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal);
} else {
putObjectRequest = new PutObjectRequest(bucket, key, input, metadata);
}
try {
PutObjectResult res = s3Client.putObject(putObjectRequest);
if (res.getETag() == null) {
return Status.ERROR;
} else {
if (sseLocal.equals("true")) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
} else if (ssecLocal != null) {
System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm());
}
}
} catch (Exception e) {
System.err.println("Not possible to write object :" + key);
e.printStackTrace();
return Status.ERROR;
}
} catch (Exception e) {
System.err.println("Error in the creation of the stream :" + e.toString());
e.printStackTrace();
return Status.ERROR;
}
return Status.OK;
}
use of com.amazonaws.services.s3.transfer.Upload in project android-simpl3r by jgilfelt.
the class Uploader method start.
/**
* Initiate a multipart file upload to Amazon S3
*
* @return the URL of a successfully uploaded file
*/
public String start() {
// initialize
List<PartETag> partETags = new ArrayList<PartETag>();
final long contentLength = file.length();
long filePosition = 0;
int startPartNumber = 1;
userInterrupted = false;
userAborted = false;
bytesUploaded = 0;
// check if we can resume an incomplete download
String uploadId = getCachedUploadId();
if (uploadId != null) {
// we can resume the download
Log.i(TAG, "resuming upload for " + uploadId);
// get the cached etags
List<PartETag> cachedEtags = getCachedPartEtags();
partETags.addAll(cachedEtags);
// calculate the start position for resume
startPartNumber = cachedEtags.size() + 1;
filePosition = (startPartNumber - 1) * partSize;
bytesUploaded = filePosition;
Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);
} else {
// initiate a new multi part upload
Log.i(TAG, "initiating new upload");
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
configureInitiateRequest(initRequest);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
uploadId = initResponse.getUploadId();
}
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId);
for (int k = startPartNumber; filePosition < contentLength; k++) {
long thisPartSize = Math.min(partSize, (contentLength - filePosition));
Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key).withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file).withPartSize(thisPartSize);
ProgressListener s3progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
// TODO calling shutdown too brute force?
if (userInterrupted) {
s3Client.shutdown();
throw new UploadIterruptedException("User interrupted");
} else if (userAborted) {
// aborted requests cannot be resumed, so clear any cached etags
clearProgressCache();
s3Client.abortMultipartUpload(abortRequest);
s3Client.shutdown();
}
bytesUploaded += progressEvent.getBytesTransfered();
//Log.d(TAG, "bytesUploaded=" + bytesUploaded);
// broadcast progress
float fpercent = ((bytesUploaded * 100) / contentLength);
int percent = Math.round(fpercent);
if (progressListener != null) {
progressListener.progressChanged(progressEvent, bytesUploaded, percent);
}
}
};
uploadRequest.setProgressListener(s3progressListener);
UploadPartResult result = s3Client.uploadPart(uploadRequest);
partETags.add(result.getPartETag());
// cache the part progress for this upload
if (k == 1) {
initProgressCache(uploadId);
}
// store part etag
cachePartEtag(result);
filePosition += thisPartSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags);
CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
bytesUploaded = 0;
Log.i(TAG, "upload complete for " + uploadId);
clearProgressCache();
return result.getLocation();
}
use of com.amazonaws.services.s3.transfer.Upload in project android-simpl3r by jgilfelt.
the class UploadService method onHandleIntent.
@Override
protected void onHandleIntent(Intent intent) {
String filePath = intent.getStringExtra(ARG_FILE_PATH);
File fileToUpload = new File(filePath);
final String s3ObjectKey = md5(filePath);
String s3BucketName = getString(R.string.s3_bucket);
final String msg = "Uploading " + s3ObjectKey + "...";
// create a new uploader for this file
uploader = new Uploader(this, s3Client, s3BucketName, s3ObjectKey, fileToUpload);
// listen for progress updates and broadcast/notify them appropriately
uploader.setProgressListener(new UploadProgressListener() {
@Override
public void progressChanged(ProgressEvent progressEvent, long bytesUploaded, int percentUploaded) {
Notification notification = buildNotification(msg, percentUploaded);
nm.notify(NOTIFY_ID_UPLOAD, notification);
broadcastState(s3ObjectKey, percentUploaded, msg);
}
});
// broadcast/notify that our upload is starting
Notification notification = buildNotification(msg, 0);
nm.notify(NOTIFY_ID_UPLOAD, notification);
broadcastState(s3ObjectKey, 0, msg);
try {
// initiate the upload
String s3Location = uploader.start();
broadcastState(s3ObjectKey, -1, "File successfully uploaded to " + s3Location);
} catch (UploadIterruptedException uie) {
broadcastState(s3ObjectKey, -1, "User interrupted");
} catch (Exception e) {
e.printStackTrace();
broadcastState(s3ObjectKey, -1, "Error: " + e.getMessage());
}
}
use of com.amazonaws.services.s3.transfer.Upload in project camel by apache.
the class S3Producer method processMultiPart.
public void processMultiPart(final Exchange exchange) throws Exception {
File filePayload = null;
Object obj = exchange.getIn().getMandatoryBody();
// Need to check if the message body is WrappedFile
if (obj instanceof WrappedFile) {
obj = ((WrappedFile<?>) obj).getFile();
}
if (obj instanceof File) {
filePayload = (File) obj;
} else {
throw new InvalidArgumentException("aws-s3: MultiPart upload requires a File input.");
}
ObjectMetadata objectMetadata = determineMetadata(exchange);
if (objectMetadata.getContentLength() == 0) {
objectMetadata.setContentLength(filePayload.length());
}
final String keyName = determineKey(exchange);
final InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(getConfiguration().getBucketName(), keyName, objectMetadata);
String storageClass = determineStorageClass(exchange);
if (storageClass != null) {
initRequest.setStorageClass(StorageClass.fromValue(storageClass));
}
String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
if (cannedAcl != null) {
CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
initRequest.setCannedACL(objectAcl);
}
AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
if (acl != null) {
// note: if cannedacl and acl are both specified the last one will be used. refer to
// PutObjectRequest#setAccessControlList for more details
initRequest.setAccessControlList(acl);
}
LOG.trace("Initiating multipart upload [{}] from exchange [{}]...", initRequest, exchange);
final InitiateMultipartUploadResult initResponse = getEndpoint().getS3Client().initiateMultipartUpload(initRequest);
final long contentLength = objectMetadata.getContentLength();
final List<PartETag> partETags = new ArrayList<PartETag>();
long partSize = getConfiguration().getPartSize();
CompleteMultipartUploadResult uploadResult = null;
long filePosition = 0;
try {
for (int part = 1; filePosition < contentLength; part++) {
partSize = Math.min(partSize, contentLength - filePosition);
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(getConfiguration().getBucketName()).withKey(keyName).withUploadId(initResponse.getUploadId()).withPartNumber(part).withFileOffset(filePosition).withFile(filePayload).withPartSize(partSize);
LOG.trace("Uploading part [{}] for {}", part, keyName);
partETags.add(getEndpoint().getS3Client().uploadPart(uploadRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(getConfiguration().getBucketName(), keyName, initResponse.getUploadId(), partETags);
uploadResult = getEndpoint().getS3Client().completeMultipartUpload(compRequest);
} catch (Exception e) {
getEndpoint().getS3Client().abortMultipartUpload(new AbortMultipartUploadRequest(getConfiguration().getBucketName(), keyName, initResponse.getUploadId()));
throw e;
}
Message message = getMessageForResponse(exchange);
message.setHeader(S3Constants.E_TAG, uploadResult.getETag());
if (uploadResult.getVersionId() != null) {
message.setHeader(S3Constants.VERSION_ID, uploadResult.getVersionId());
}
if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
FileUtil.deleteFile(filePayload);
}
}
use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3AFileSystem method createEmptyObject.
// Used to create an empty file that represents an empty directory
private void createEmptyObject(final String objectName) throws AmazonClientException, AmazonServiceException, InterruptedIOException {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
PutObjectRequest putObjectRequest = newPutObjectRequest(objectName, newObjectMetadata(0L), im);
Upload upload = putObject(putObjectRequest);
try {
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted creating " + objectName);
}
incrementPutProgressStatistics(objectName, 0);
instrumentation.directoryCreated();
}
Aggregations