use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3AFileSystem method putObject.
/**
* Start a transfer-manager managed async PUT of an object,
* incrementing the put requests and put bytes
* counters.
* It does not update the other counters,
* as existing code does that as progress callbacks come in.
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* Because the operation is async, any stream supplied in the request
* must reference data (files, buffers) which stay valid until the upload
* completes.
* @param putObjectRequest the request
* @return the upload initiated
*/
public Upload putObject(PutObjectRequest putObjectRequest) {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
incrementPutStartStatistics(len);
try {
Upload upload = transfers.upload(putObjectRequest);
incrementPutCompletedStatistics(true, len);
return upload;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3AOutputStream method close.
@Override
public void close() throws IOException {
if (closed.getAndSet(true)) {
return;
}
backupStream.close();
LOG.debug("OutputStream for key '{}' closed. Now beginning upload", key);
try {
final ObjectMetadata om = fs.newObjectMetadata(backupFile.length());
Upload upload = fs.putObject(fs.newPutObjectRequest(key, om, backupFile));
ProgressableProgressListener listener = new ProgressableProgressListener(fs, key, upload, progress);
upload.addProgressListener(listener);
upload.waitForUploadResult();
listener.uploadCompleted();
// This will delete unnecessary fake parent directories
fs.finishedWrite(key);
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
} catch (AmazonClientException e) {
throw translateException("saving output", key, e);
} finally {
if (!backupFile.delete()) {
LOG.warn("Could not delete temporary s3a file: {}", backupFile);
}
super.close();
}
LOG.debug("OutputStream for key '{}' upload complete", key);
}
use of com.amazonaws.services.s3.transfer.Upload in project hadoop by apache.
the class S3AFileSystem method createEmptyObject.
// Used to create an empty file that represents an empty directory
private void createEmptyObject(final String objectName) throws AmazonClientException, AmazonServiceException, InterruptedIOException {
final InputStream im = new InputStream() {
@Override
public int read() throws IOException {
return -1;
}
};
PutObjectRequest putObjectRequest = newPutObjectRequest(objectName, newObjectMetadata(0L), im);
Upload upload = putObject(putObjectRequest);
try {
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted creating " + objectName);
}
incrementPutProgressStatistics(objectName, 0);
instrumentation.directoryCreated();
}
use of com.amazonaws.services.s3.transfer.Upload in project aws-doc-sdk-examples by awsdocs.
the class XferMgrUpload method uploadFile.
public static void uploadFile(String file_path, String bucket_name, String key_prefix, boolean pause) {
System.out.println("file: " + file_path + (pause ? " (pause)" : ""));
String key_name = null;
if (key_prefix != null) {
key_name = key_prefix + '/' + file_path;
} else {
key_name = file_path;
}
File f = new File(file_path);
TransferManager xfer_mgr = new TransferManager();
try {
Upload xfer = xfer_mgr.upload(bucket_name, key_name, f);
// loop with Transfer.isDone()
XferMgrProgress.showTransferProgress(xfer);
// or block with Transfer.waitForCompletion()
XferMgrProgress.waitForCompletion(xfer);
} catch (AmazonServiceException e) {
System.err.println(e.getErrorMessage());
System.exit(1);
}
xfer_mgr.shutdownNow();
}
use of com.amazonaws.services.s3.transfer.Upload in project aws-doc-sdk-examples by awsdocs.
the class PutObject method main.
public static void main(String[] args) {
final String USAGE = "\n" + "To run this example, supply the name of an S3 bucket and a file to\n" + "upload to it.\n" + "\n" + "Ex: PutObject <bucketname> <filename>\n";
if (args.length < 2) {
System.out.println(USAGE);
System.exit(1);
}
String bucket_name = args[0];
String file_path = args[1];
String key_name = Paths.get(file_path).getFileName().toString();
System.out.format("Uploading %s to S3 bucket %s...\n", file_path, bucket_name);
final AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
try {
s3.putObject(bucket_name, key_name, file_path);
} catch (AmazonServiceException e) {
System.err.println(e.getErrorMessage());
System.exit(1);
}
System.out.println("Done!");
}
Aggregations