use of com.amazonaws.services.s3.transfer.Transfer in project spring-integration-aws by spring-projects.
the class S3MessageHandler method download.
private Transfer download(Message<?> requestMessage) {
Object payload = requestMessage.getPayload();
Assert.state(payload instanceof File, "For the 'DOWNLOAD' operation the 'payload' must be of " + "'java.io.File' type, but gotten: [" + payload.getClass() + ']');
File targetFile = (File) payload;
String bucket = obtainBucket(requestMessage);
String key = null;
if (this.keyExpression != null) {
key = this.keyExpression.getValue(this.evaluationContext, requestMessage, String.class);
} else {
key = targetFile.getName();
}
Assert.state(key != null, "The 'keyExpression' must not be null for non-File payloads and can't evaluate to null. " + "Root object is: " + requestMessage);
if (targetFile.isDirectory()) {
return this.transferManager.downloadDirectory(bucket, key, targetFile);
} else {
if (this.s3ProgressListener != null) {
return this.transferManager.download(new GetObjectRequest(bucket, key), targetFile, this.s3ProgressListener);
} else {
return this.transferManager.download(bucket, key, targetFile);
}
}
}
use of com.amazonaws.services.s3.transfer.Transfer in project jackrabbit by apache.
the class S3Backend method init.
public void init(CachingDataStore store, String homeDir, Properties prop) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
startTime = new Date();
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
LOG.debug("init");
setDataStore(store);
s3ReqDecorator = new S3RequestDecorator(prop);
s3service = Utils.openService(prop);
if (bucket == null || "".equals(bucket.trim())) {
bucket = prop.getProperty(S3Constants.S3_BUCKET);
}
String region = prop.getProperty(S3Constants.S3_REGION);
Region s3Region = null;
if (StringUtils.isNullOrEmpty(region)) {
com.amazonaws.regions.Region ec2Region = Regions.getCurrentRegion();
if (ec2Region != null) {
s3Region = Region.fromValue(ec2Region.getName());
} else {
throw new AmazonClientException("parameter [" + S3Constants.S3_REGION + "] not configured and cannot be derived from environment");
}
} else {
if (Utils.DEFAULT_AWS_BUCKET_REGION.equals(region)) {
s3Region = Region.US_Standard;
} else if (Region.EU_Ireland.toString().equals(region)) {
s3Region = Region.EU_Ireland;
} else {
s3Region = Region.fromValue(region);
}
}
if (!s3service.doesBucketExist(bucket)) {
s3service.createBucket(bucket, s3Region);
LOG.info("Created bucket [{}] in [{}] ", bucket, region);
} else {
LOG.info("Using bucket [{}] in [{}] ", bucket, region);
}
int writeThreads = 10;
String writeThreadsStr = prop.getProperty(S3Constants.S3_WRITE_THREADS);
if (writeThreadsStr != null) {
writeThreads = Integer.parseInt(writeThreadsStr);
}
LOG.info("Using thread pool of [{}] threads in S3 transfer manager.", writeThreads);
tmx = new TransferManager(s3service, (ThreadPoolExecutor) Executors.newFixedThreadPool(writeThreads, new NamedThreadFactory("s3-transfer-manager-worker")));
int asyncWritePoolSize = 10;
String maxConnsStr = prop.getProperty(S3Constants.S3_MAX_CONNS);
if (maxConnsStr != null) {
asyncWritePoolSize = Integer.parseInt(maxConnsStr) - writeThreads;
}
setAsyncWritePoolSize(asyncWritePoolSize);
String renameKeyProp = prop.getProperty(S3Constants.S3_RENAME_KEYS);
boolean renameKeyBool = (renameKeyProp == null || "".equals(renameKeyProp)) ? false : Boolean.parseBoolean(renameKeyProp);
LOG.info("Rename keys [{}]", renameKeyBool);
if (renameKeyBool) {
renameKeys();
}
LOG.debug("S3 Backend initialized in [{}] ms", +(System.currentTimeMillis() - startTime.getTime()));
} catch (Exception e) {
LOG.debug(" error ", e);
throw new DataStoreException("Could not initialize S3 from " + prop, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of com.amazonaws.services.s3.transfer.Transfer in project hadoop by apache.
the class S3AFileSystem method putObject.
/**
* Start a transfer-manager managed async PUT of an object,
* incrementing the put requests and put bytes
* counters.
* It does not update the other counters,
* as existing code does that as progress callbacks come in.
* Byte length is calculated from the file length, or, if there is no
* file, from the content length of the header.
* Because the operation is async, any stream supplied in the request
* must reference data (files, buffers) which stay valid until the upload
* completes.
* @param putObjectRequest the request
* @return the upload initiated
*/
public Upload putObject(PutObjectRequest putObjectRequest) {
long len;
if (putObjectRequest.getFile() != null) {
len = putObjectRequest.getFile().length();
} else {
len = putObjectRequest.getMetadata().getContentLength();
}
incrementPutStartStatistics(len);
try {
Upload upload = transfers.upload(putObjectRequest);
incrementPutCompletedStatistics(true, len);
return upload;
} catch (AmazonClientException e) {
incrementPutCompletedStatistics(false, len);
throw e;
}
}
use of com.amazonaws.services.s3.transfer.Transfer in project aws-doc-sdk-examples by awsdocs.
the class XferMgrProgress method showTransferProgress.
// Prints progress while waiting for the transfer to finish.
public static void showTransferProgress(Transfer xfer) {
// print the transfer's human-readable description
System.out.println(xfer.getDescription());
// print an empty progress bar...
printProgressBar(0.0);
// update the progress bar while the xfer is ongoing.
do {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
return;
}
// Note: so_far and total aren't used, they're just for
// documentation purposes.
TransferProgress progress = xfer.getProgress();
long so_far = progress.getBytesTransferred();
long total = progress.getTotalBytesToTransfer();
double pct = progress.getPercentTransferred();
eraseProgressBar();
printProgressBar(pct);
} while (xfer.isDone() == false);
// print the final state of the transfer.
TransferState xfer_state = xfer.getState();
System.out.println(": " + xfer_state);
}
use of com.amazonaws.services.s3.transfer.Transfer in project stocator by SparkTC.
the class COSAPIClient method initiate.
@Override
public void initiate(String scheme) throws IOException, ConfigurationParseException {
mCachedSparkOriginated = new HashMap<String, Boolean>();
mCachedSparkJobsStatus = new HashMap<String, Boolean>();
schemaProvided = scheme;
Properties props = ConfigurationHandler.initialize(filesystemURI, conf, scheme);
// Set bucket name property
int cacheSize = conf.getInt(CACHE_SIZE, GUAVA_CACHE_SIZE_DEFAULT);
memoryCache = MemoryCache.getInstance(cacheSize);
mBucket = props.getProperty(COS_BUCKET_PROPERTY);
workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(filesystemURI, getWorkingDirectory());
LOG.trace("Working directory set to {}", workingDir);
fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_COS_PROPERTY, "false"));
mIsV2Signer = "true".equals(props.getProperty(V2_SIGNER_TYPE_COS_PROPERTY, "false"));
// Define COS client
String accessKey = props.getProperty(ACCESS_KEY_COS_PROPERTY);
String secretKey = props.getProperty(SECRET_KEY_COS_PROPERTY);
if (accessKey == null) {
throw new ConfigurationParseException("Access KEY is empty. Please provide valid access key");
}
if (secretKey == null) {
throw new ConfigurationParseException("Secret KEY is empty. Please provide valid secret key");
}
BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConf = new ClientConfiguration();
int maxThreads = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_THREADS, DEFAULT_MAX_THREADS);
if (maxThreads < 2) {
LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
maxThreads = 2;
}
int totalTasks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS);
long keepAliveTime = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");
unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));
boolean secureConnections = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
String proxyHost = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_HOST, "");
int proxyPort = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, PROXY_PORT, -1);
if (!proxyHost.isEmpty()) {
clientConf.setProxyHost(proxyHost);
if (proxyPort >= 0) {
clientConf.setProxyPort(proxyPort);
} else {
if (secureConnections) {
LOG.warn("Proxy host set without port. Using HTTPS default 443");
clientConf.setProxyPort(443);
} else {
LOG.warn("Proxy host set without port. Using HTTP default 80");
clientConf.setProxyPort(80);
}
}
String proxyUsername = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_USERNAME);
String proxyPassword = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_PASSWORD);
if ((proxyUsername == null) != (proxyPassword == null)) {
String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
clientConf.setProxyUsername(proxyUsername);
clientConf.setProxyPassword(proxyPassword);
clientConf.setProxyDomain(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_DOMAIN));
clientConf.setProxyWorkstation(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_WORKSTATION));
if (LOG.isDebugEnabled()) {
LOG.debug("Using proxy server {}:{} as user {} on " + "domain {} as workstation {}", clientConf.getProxyHost(), clientConf.getProxyPort(), String.valueOf(clientConf.getProxyUsername()), clientConf.getProxyDomain(), clientConf.getProxyWorkstation());
}
} else if (proxyPort >= 0) {
String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
initConnectionSettings(conf, clientConf);
if (mIsV2Signer) {
clientConf.withSignerOverride("S3SignerType");
}
mClient = new AmazonS3Client(creds, clientConf);
final String serviceUrl = props.getProperty(ENDPOINT_URL_COS_PROPERTY);
if (serviceUrl != null && !serviceUrl.equals(amazonDefaultEndpoint)) {
mClient.setEndpoint(serviceUrl);
}
mClient.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
// Set block size property
String mBlockSizeString = props.getProperty(BLOCK_SIZE_COS_PROPERTY, "128");
mBlockSize = Long.valueOf(mBlockSizeString).longValue() * 1024 * 1024L;
bufferDirectory = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, BUFFER_DIR);
bufferDirectoryKey = Utils.getConfigKey(conf, FS_COS, FS_ALT_KEYS, BUFFER_DIR);
LOG.trace("Buffer directory is set to {} for the key {}", bufferDirectory, bufferDirectoryKey);
boolean autoCreateBucket = "true".equalsIgnoreCase((props.getProperty(AUTO_BUCKET_CREATE_COS_PROPERTY, "false")));
partSize = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
multiPartThreshold = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
readAhead = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE);
LOG.debug(READAHEAD_RANGE + ":" + readAhead);
inputPolicy = COSInputPolicy.getPolicy(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, INPUT_FADVISE, INPUT_FADV_NORMAL));
initTransferManager();
maxKeys = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
flatListingFlag = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FLAT_LISTING, DEFAULT_FLAT_LISTING);
if (autoCreateBucket) {
try {
boolean bucketExist = mClient.doesBucketExist(mBucket);
if (bucketExist) {
LOG.trace("Bucket {} exists", mBucket);
} else {
LOG.trace("Bucket {} doesn`t exists and autocreate", mBucket);
String mRegion = props.getProperty(REGION_COS_PROPERTY);
if (mRegion == null) {
mClient.createBucket(mBucket);
} else {
LOG.trace("Creating bucket {} in region {}", mBucket, mRegion);
mClient.createBucket(mBucket, mRegion);
}
}
} catch (AmazonServiceException ase) {
/*
* we ignore the BucketAlreadyExists exception since multiple processes or threads
* might try to create the bucket in parrallel, therefore it is expected that
* some will fail to create the bucket
*/
if (!ase.getErrorCode().equals("BucketAlreadyExists")) {
LOG.error(ase.getMessage());
throw (ase);
}
} catch (Exception e) {
LOG.error(e.getMessage());
throw (e);
}
}
initMultipartUploads(conf);
enableMultiObjectsDelete = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, ENABLE_MULTI_DELETE, true);
blockUploadEnabled = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
if (blockUploadEnabled) {
blockOutputBuffer = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER);
partSize = COSUtils.ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
blockFactory = COSDataBlocks.createFactory(this, blockOutputBuffer);
blockOutputActiveBlocks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS);
LOG.debug("Using COSBlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
} else {
LOG.debug("Using COSOutputStream");
}
}
Aggregations