use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3AFileSystem method uploadPart.
/**
* Upload part of a multi-partition file.
* Increments the write and put counters.
* <i>Important: this call does not close any input stream in the request.</i>
* @param request request
* @return the result of the operation.
* @throws AmazonClientException on problems
*/
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException {
long len = request.getPartSize();
incrementPutStartStatistics(len);
try {
UploadPartResult uploadPartResult = s3.uploadPart(request);
incrementPutCompletedStatistics(true, len);
return uploadPartResult;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3ABlockOutputStream method putObject.
/**
* Upload the current block as a single PUT request; if the buffer
* is empty a 0-byte PUT will be invoked, as it is needed to create an
* entry at the far end.
* @throws IOException any problem.
*/
private void putObject() throws IOException {
LOG.debug("Executing regular upload for {}", writeOperationHelper);
final S3ADataBlocks.DataBlock block = getActiveBlock();
int size = block.dataSize();
final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
final PutObjectRequest putObjectRequest = uploadData.hasFile() ? writeOperationHelper.newPutRequest(uploadData.getFile()) : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
fs.setOptionalPutRequestParameters(putObjectRequest);
long transferQueueTime = now();
BlockUploadProgress callback = new BlockUploadProgress(block, progressListener, transferQueueTime);
putObjectRequest.setGeneralProgressListener(callback);
statistics.blockUploadQueued(size);
ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
@Override
public PutObjectResult call() throws Exception {
PutObjectResult result;
try {
// the putObject call automatically closes the input
// stream afterwards.
result = writeOperationHelper.putObject(putObjectRequest);
} finally {
closeAll(LOG, uploadData, block);
}
return result;
}
});
clearActiveBlock();
//wait for completion
try {
putObjectResult.get();
} catch (InterruptedException ie) {
LOG.warn("Interrupted object upload", ie);
Thread.currentThread().interrupt();
} catch (ExecutionException ee) {
throw extractException("regular upload", key, ee);
}
}
use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3AFileSystem method putObjectDirect.
/**
* PUT an object directly (i.e. not via the transfer manager).
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* <i>Important: this call will close any input stream in the request.</i>
* @param putObjectRequest the request
* @return the upload initiated
* @throws AmazonClientException on problems
*/
public PutObjectResult putObjectDirect(PutObjectRequest putObjectRequest) throws AmazonClientException {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
incrementPutStartStatistics(len);
try {
PutObjectResult result = s3.putObject(putObjectRequest);
incrementPutCompletedStatistics(true, len);
return result;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
use of com.amazonaws.services.s3.transfer.Upload in project deeplearning4j by deeplearning4j.
the class S3Uploader method multiPartUpload.
/**
* Multi part upload for big files
* @param file the file to upload
* @param bucketName the bucket name to upload
*/
public void multiPartUpload(File file, String bucketName) {
AmazonS3 client = new AmazonS3Client(creds);
bucketName = ensureValidBucketName(bucketName);
List<Bucket> buckets = client.listBuckets();
for (Bucket b : buckets) if (b.getName().equals(bucketName)) {
doMultiPart(client, bucketName, file);
return;
}
//bucket didn't exist: create it
client.createBucket(bucketName);
doMultiPart(client, bucketName, file);
}
use of com.amazonaws.services.s3.transfer.Upload in project ice by Netflix.
the class MapDb method upload.
void upload() {
AmazonS3Client s3Client = AwsUtils.getAmazonS3Client();
File dir = new File(config.localDir);
File[] files = dir.listFiles(new FilenameFilter() {
public boolean accept(File file, String fileName) {
return fileName.startsWith(dbName);
}
});
for (File file : files) s3Client.putObject(config.workS3BucketName, config.workS3BucketPrefix + file.getName(), file);
for (File file : files) s3Client.putObject(config.workS3BucketName, config.workS3BucketPrefix + "copy" + file.getName(), file);
}
Aggregations