use of com.amazonaws.services.s3.transfer.internal.S3ProgressListener in project android-simpl3r by jgilfelt.
the class Uploader method start.
/**
* Initiate a multipart file upload to Amazon S3
*
* @return the URL of a successfully uploaded file
*/
public String start() {
// initialize
List<PartETag> partETags = new ArrayList<PartETag>();
final long contentLength = file.length();
long filePosition = 0;
int startPartNumber = 1;
userInterrupted = false;
userAborted = false;
bytesUploaded = 0;
// check if we can resume an incomplete download
String uploadId = getCachedUploadId();
if (uploadId != null) {
// we can resume the download
Log.i(TAG, "resuming upload for " + uploadId);
// get the cached etags
List<PartETag> cachedEtags = getCachedPartEtags();
partETags.addAll(cachedEtags);
// calculate the start position for resume
startPartNumber = cachedEtags.size() + 1;
filePosition = (startPartNumber - 1) * partSize;
bytesUploaded = filePosition;
Log.i(TAG, "resuming at part " + startPartNumber + " position " + filePosition);
} else {
// initiate a new multi part upload
Log.i(TAG, "initiating new upload");
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(s3bucketName, s3key);
configureInitiateRequest(initRequest);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
uploadId = initResponse.getUploadId();
}
final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(s3bucketName, s3key, uploadId);
for (int k = startPartNumber; filePosition < contentLength; k++) {
long thisPartSize = Math.min(partSize, (contentLength - filePosition));
Log.i(TAG, "starting file part " + k + " with size " + thisPartSize);
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(s3bucketName).withKey(s3key).withUploadId(uploadId).withPartNumber(k).withFileOffset(filePosition).withFile(file).withPartSize(thisPartSize);
ProgressListener s3progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
// TODO calling shutdown too brute force?
if (userInterrupted) {
s3Client.shutdown();
throw new UploadIterruptedException("User interrupted");
} else if (userAborted) {
// aborted requests cannot be resumed, so clear any cached etags
clearProgressCache();
s3Client.abortMultipartUpload(abortRequest);
s3Client.shutdown();
}
bytesUploaded += progressEvent.getBytesTransfered();
//Log.d(TAG, "bytesUploaded=" + bytesUploaded);
// broadcast progress
float fpercent = ((bytesUploaded * 100) / contentLength);
int percent = Math.round(fpercent);
if (progressListener != null) {
progressListener.progressChanged(progressEvent, bytesUploaded, percent);
}
}
};
uploadRequest.setProgressListener(s3progressListener);
UploadPartResult result = s3Client.uploadPart(uploadRequest);
partETags.add(result.getPartETag());
// cache the part progress for this upload
if (k == 1) {
initProgressCache(uploadId);
}
// store part etag
cachePartEtag(result);
filePosition += thisPartSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(s3bucketName, s3key, uploadId, partETags);
CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
bytesUploaded = 0;
Log.i(TAG, "upload complete for " + uploadId);
clearProgressCache();
return result.getLocation();
}
use of com.amazonaws.services.s3.transfer.internal.S3ProgressListener in project spring-integration-aws by spring-projects.
the class S3MessageHandler method upload.
private Transfer upload(Message<?> requestMessage) {
Object payload = requestMessage.getPayload();
String bucketName = obtainBucket(requestMessage);
String key = null;
if (this.keyExpression != null) {
key = this.keyExpression.getValue(this.evaluationContext, requestMessage, String.class);
}
if (payload instanceof File && ((File) payload).isDirectory()) {
File fileToUpload = (File) payload;
if (key == null) {
key = fileToUpload.getName();
}
return this.transferManager.uploadDirectory(bucketName, key, fileToUpload, true, new MessageHeadersObjectMetadataProvider(requestMessage.getHeaders()));
} else {
ObjectMetadata metadata = new ObjectMetadata();
if (this.uploadMetadataProvider != null) {
this.uploadMetadataProvider.populateMetadata(metadata, requestMessage);
}
PutObjectRequest putObjectRequest = null;
try {
if (payload instanceof InputStream) {
InputStream inputStream = (InputStream) payload;
if (metadata.getContentMD5() == null) {
Assert.state(inputStream.markSupported(), "For an upload InputStream with no MD5 digest metadata, the " + "markSupported() method must evaluate to true. ");
String contentMd5 = Md5Utils.md5AsBase64(inputStream);
metadata.setContentMD5(contentMd5);
inputStream.reset();
}
putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, metadata);
} else if (payload instanceof File) {
File fileToUpload = (File) payload;
if (key == null) {
key = fileToUpload.getName();
}
if (metadata.getContentMD5() == null) {
String contentMd5 = Md5Utils.md5AsBase64(fileToUpload);
metadata.setContentMD5(contentMd5);
}
if (metadata.getContentLength() == 0) {
metadata.setContentLength(fileToUpload.length());
}
if (metadata.getContentType() == null) {
metadata.setContentType(Mimetypes.getInstance().getMimetype(fileToUpload));
}
putObjectRequest = new PutObjectRequest(bucketName, key, fileToUpload).withMetadata(metadata);
} else if (payload instanceof byte[]) {
byte[] payloadBytes = (byte[]) payload;
InputStream inputStream = new ByteArrayInputStream(payloadBytes);
if (metadata.getContentMD5() == null) {
String contentMd5 = Md5Utils.md5AsBase64(inputStream);
metadata.setContentMD5(contentMd5);
inputStream.reset();
}
if (metadata.getContentLength() == 0) {
metadata.setContentLength(payloadBytes.length);
}
putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, metadata);
} else {
throw new IllegalArgumentException("Unsupported payload type: [" + payload.getClass() + "]. The only supported payloads for the upload request are " + "java.io.File, java.io.InputStream, byte[] and PutObjectRequest.");
}
} catch (IOException e) {
throw new MessageHandlingException(requestMessage, e);
}
if (key == null) {
if (this.keyExpression != null) {
throw new IllegalStateException("The 'keyExpression' [" + this.keyExpression.getExpressionString() + "] must not evaluate to null. Root object is: " + requestMessage);
} else {
throw new IllegalStateException("Specify a 'keyExpression' for non-java.io.File payloads");
}
}
S3ProgressListener progressListener = this.s3ProgressListener;
if (this.objectAclExpression != null) {
Object acl = this.objectAclExpression.getValue(this.evaluationContext, requestMessage);
Assert.state(acl instanceof AccessControlList || acl instanceof CannedAccessControlList, "The 'objectAclExpression' [" + this.objectAclExpression.getExpressionString() + "] must evaluate to com.amazonaws.services.s3.model.AccessControlList " + "or must evaluate to com.amazonaws.services.s3.model.CannedAccessControlList. " + "Gotten: [" + acl + "]");
SetObjectAclRequest aclRequest;
if (acl instanceof AccessControlList) {
aclRequest = new SetObjectAclRequest(bucketName, key, (AccessControlList) acl);
} else {
aclRequest = new SetObjectAclRequest(bucketName, key, (CannedAccessControlList) acl);
}
final SetObjectAclRequest theAclRequest = aclRequest;
progressListener = new S3ProgressListener() {
@Override
public void onPersistableTransfer(PersistableTransfer persistableTransfer) {
}
@Override
public void progressChanged(ProgressEvent progressEvent) {
if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(progressEvent.getEventType())) {
S3MessageHandler.this.transferManager.getAmazonS3Client().setObjectAcl(theAclRequest);
}
}
};
if (this.s3ProgressListener != null) {
progressListener = new S3ProgressListenerChain(this.s3ProgressListener, progressListener);
}
}
if (progressListener != null) {
return this.transferManager.upload(putObjectRequest, progressListener);
} else {
return this.transferManager.upload(putObjectRequest);
}
}
}
Aggregations