use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project exhibitor by soabase.
the class S3ClientImpl method listObjects.
@Override
public ObjectListing listObjects(ListObjectsRequest request) throws Exception {
RefCountedClient holder = client.get();
AmazonS3Client amazonS3Client = holder.useClient();
try {
return amazonS3Client.listObjects(request);
} finally {
holder.release();
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project deeplearning4j by deeplearning4j.
the class S3Uploader method upload.
/**
* Upload the file to the bucket.
* Will create the bucket if it hasn't already been created
* @param file the file to upload
* @param bucketName the name of the bucket
*/
public void upload(File file, String bucketName) {
AmazonS3 client = new AmazonS3Client(creds);
bucketName = ensureValidBucketName(bucketName);
List<Bucket> buckets = client.listBuckets();
for (Bucket b : buckets) if (b.getName().equals(bucketName)) {
client.putObject(bucketName, file.getName(), file);
return;
}
//bucket didn't exist: create it
client.createBucket(bucketName);
client.putObject(bucketName, file.getName(), file);
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project cloudstack by apache.
the class S3Utils method getTransferManager.
public static TransferManager getTransferManager(final ClientOptions clientOptions) {
if (TRANSFERMANAGER_ACCESSKEY_MAP.containsKey(clientOptions.getAccessKey())) {
return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
}
final AWSCredentials basicAWSCredentials = new BasicAWSCredentials(clientOptions.getAccessKey(), clientOptions.getSecretKey());
final ClientConfiguration configuration = new ClientConfiguration();
if (clientOptions.isHttps() != null) {
configuration.setProtocol(clientOptions.isHttps() ? HTTPS : HTTP);
}
if (clientOptions.getConnectionTimeout() != null) {
configuration.setConnectionTimeout(clientOptions.getConnectionTimeout());
}
if (clientOptions.getMaxErrorRetry() != null) {
configuration.setMaxErrorRetry(clientOptions.getMaxErrorRetry());
}
if (clientOptions.getSocketTimeout() != null) {
configuration.setSocketTimeout(clientOptions.getSocketTimeout());
}
if (clientOptions.getUseTCPKeepAlive() != null) {
configuration.setUseTcpKeepAlive(clientOptions.getUseTCPKeepAlive());
}
if (clientOptions.getConnectionTtl() != null) {
configuration.setConnectionTTL(clientOptions.getConnectionTtl());
}
if (clientOptions.getSigner() != null) {
configuration.setSignerOverride(clientOptions.getSigner());
}
LOGGER.debug(format("Creating S3 client with configuration: [protocol: %1$s, signer: %2$s, connectionTimeOut: %3$s, maxErrorRetry: %4$s, socketTimeout: %5$s, useTCPKeepAlive: %6$s, connectionTtl: %7$s]", configuration.getProtocol(), configuration.getSignerOverride(), configuration.getConnectionTimeout(), configuration.getMaxErrorRetry(), configuration.getSocketTimeout(), clientOptions.getUseTCPKeepAlive(), clientOptions.getConnectionTtl()));
final AmazonS3Client client = new AmazonS3Client(basicAWSCredentials, configuration);
if (isNotBlank(clientOptions.getEndPoint())) {
LOGGER.debug(format("Setting the end point for S3 client with access key %1$s to %2$s.", clientOptions.getAccessKey(), clientOptions.getEndPoint()));
client.setEndpoint(clientOptions.getEndPoint());
}
TRANSFERMANAGER_ACCESSKEY_MAP.put(clientOptions.getAccessKey(), new TransferManager(client));
return TRANSFERMANAGER_ACCESSKEY_MAP.get(clientOptions.getAccessKey());
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project symmetric-ds by JumpMind.
the class RedshiftBulkDatabaseWriter method flush.
protected void flush() {
if (loadedRows > 0) {
stagedInputFile.close();
statistics.get(batch).startTimer(DataWriterStatisticConstants.DATABASEMILLIS);
AmazonS3 s3client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
if (isNotBlank(s3Endpoint)) {
s3client.setEndpoint(s3Endpoint);
}
String objectKey = stagedInputFile.getFile().getName();
try {
s3client.putObject(bucket, objectKey, stagedInputFile.getFile());
} catch (AmazonServiceException ase) {
log.error("Exception from AWS service: " + ase.getMessage());
} catch (AmazonClientException ace) {
log.error("Exception from AWS client: " + ace.getMessage());
}
try {
JdbcSqlTransaction jdbcTransaction = (JdbcSqlTransaction) transaction;
Connection c = jdbcTransaction.getConnection();
String sql = "COPY " + getTargetTable().getFullyQualifiedTableName() + " (" + Table.getCommaDeliminatedColumns(table.getColumns()) + ") FROM 's3://" + bucket + "/" + objectKey + "' CREDENTIALS 'aws_access_key_id=" + accessKey + ";aws_secret_access_key=" + secretKey + "' CSV DATEFORMAT 'YYYY-MM-DD HH:MI:SS' " + (needsExplicitIds ? "EXPLICIT_IDS" : "") + (isNotBlank(appendToCopyCommand) ? (" " + appendToCopyCommand) : "");
Statement stmt = c.createStatement();
log.debug(sql);
stmt.execute(sql);
stmt.close();
transaction.commit();
} catch (SQLException ex) {
throw platform.getSqlTemplate().translate(ex);
} finally {
statistics.get(batch).stopTimer(DataWriterStatisticConstants.DATABASEMILLIS);
}
stagedInputFile.delete();
try {
s3client.deleteObject(bucket, objectKey);
} catch (AmazonServiceException ase) {
log.error("Exception from AWS service: " + ase.getMessage());
} catch (AmazonClientException ace) {
log.error("Exception from AWS client: " + ace.getMessage());
}
createStagingFile();
loadedRows = 0;
loadedBytes = 0;
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3Client in project jackrabbit-oak by apache.
the class S3DataStoreUtils method deleteBucket.
public static void deleteBucket(String bucket, Date date) throws Exception {
log.info("cleaning bucket [" + bucket + "]");
Properties props = getS3Config();
AmazonS3Client s3service = Utils.openService(props);
TransferManager tmx = new TransferManager(s3service);
if (s3service.doesBucketExist(bucket)) {
for (int i = 0; i < 4; i++) {
tmx.abortMultipartUploads(bucket, date);
ObjectListing prevObjectListing = s3service.listObjects(bucket);
while (prevObjectListing != null) {
List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
}
if (deleteList.size() > 0) {
DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
delObjsReq.setKeys(deleteList);
s3service.deleteObjects(delObjsReq);
}
if (!prevObjectListing.isTruncated())
break;
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
}
s3service.deleteBucket(bucket);
log.info("bucket [ " + bucket + "] cleaned");
} else {
log.info("bucket [" + bucket + "] doesn't exists");
}
tmx.shutdownNow();
s3service.shutdown();
}
Aggregations