use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project deeplearning4j by deeplearning4j.
the class S3Uploader method upload.
/**
* Upload the file to the bucket.
* Will create the bucket if it hasn't already been created
* @param file the file to upload
* @param bucketName the name of the bucket
*/
public void upload(File file, String bucketName) {
AmazonS3 client = new AmazonS3Client(creds);
bucketName = ensureValidBucketName(bucketName);
List<Bucket> buckets = client.listBuckets();
for (Bucket b : buckets) if (b.getName().equals(bucketName)) {
client.putObject(bucketName, file.getName(), file);
return;
}
//bucket didn't exist: create it
client.createBucket(bucketName);
client.putObject(bucketName, file.getName(), file);
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project symmetric-ds by JumpMind.
the class RedshiftBulkDatabaseWriter method flush.
protected void flush() {
if (loadedRows > 0) {
stagedInputFile.close();
statistics.get(batch).startTimer(DataWriterStatisticConstants.DATABASEMILLIS);
AmazonS3 s3client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
if (isNotBlank(s3Endpoint)) {
s3client.setEndpoint(s3Endpoint);
}
String objectKey = stagedInputFile.getFile().getName();
try {
s3client.putObject(bucket, objectKey, stagedInputFile.getFile());
} catch (AmazonServiceException ase) {
log.error("Exception from AWS service: " + ase.getMessage());
} catch (AmazonClientException ace) {
log.error("Exception from AWS client: " + ace.getMessage());
}
try {
JdbcSqlTransaction jdbcTransaction = (JdbcSqlTransaction) transaction;
Connection c = jdbcTransaction.getConnection();
String sql = "COPY " + getTargetTable().getFullyQualifiedTableName() + " (" + Table.getCommaDeliminatedColumns(table.getColumns()) + ") FROM 's3://" + bucket + "/" + objectKey + "' CREDENTIALS 'aws_access_key_id=" + accessKey + ";aws_secret_access_key=" + secretKey + "' CSV DATEFORMAT 'YYYY-MM-DD HH:MI:SS' " + (needsExplicitIds ? "EXPLICIT_IDS" : "") + (isNotBlank(appendToCopyCommand) ? (" " + appendToCopyCommand) : "");
Statement stmt = c.createStatement();
log.debug(sql);
stmt.execute(sql);
stmt.close();
transaction.commit();
} catch (SQLException ex) {
throw platform.getSqlTemplate().translate(ex);
} finally {
statistics.get(batch).stopTimer(DataWriterStatisticConstants.DATABASEMILLIS);
}
stagedInputFile.delete();
try {
s3client.deleteObject(bucket, objectKey);
} catch (AmazonServiceException ase) {
log.error("Exception from AWS service: " + ase.getMessage());
} catch (AmazonClientException ace) {
log.error("Exception from AWS client: " + ace.getMessage());
}
createStagingFile();
loadedRows = 0;
loadedBytes = 0;
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project hadoop by apache.
the class ITestS3AConfiguration method testEndpoint.
/**
* Test if custom endpoint is picked up.
* <p>
* The test expects {@link S3ATestConstants#CONFIGURATION_TEST_ENDPOINT}
* to be defined in the Configuration
* describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
* (i.e. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
* Evidently, the bucket has to be hosted in the region denoted by the
* endpoint for the test to succeed.
* <p>
* More info and the list of endpoint identifiers:
* @see <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">endpoint list</a>.
*
* @throws Exception
*/
@Test
public void testEndpoint() throws Exception {
conf = new Configuration();
String endpoint = conf.getTrimmed(S3ATestConstants.CONFIGURATION_TEST_ENDPOINT, "");
if (endpoint.isEmpty()) {
LOG.warn("Custom endpoint test skipped as " + S3ATestConstants.CONFIGURATION_TEST_ENDPOINT + "config " + "setting was not detected");
} else {
conf.set(Constants.ENDPOINT, endpoint);
fs = S3ATestUtils.createTestFileSystem(conf);
AmazonS3 s3 = fs.getAmazonS3Client();
String endPointRegion = "";
// Differentiate handling of "s3-" and "s3." based endpoint identifiers
String[] endpointParts = StringUtils.split(endpoint, '.');
if (endpointParts.length == 3) {
endPointRegion = endpointParts[0].substring(3);
} else if (endpointParts.length == 4) {
endPointRegion = endpointParts[1];
} else {
fail("Unexpected endpoint");
}
assertEquals("Endpoint config setting and bucket location differ: ", endPointRegion, s3.getBucketLocation(fs.getUri().getHost()));
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project hadoop by apache.
the class ITestS3AConfiguration method shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty.
@Test
public void shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty() throws Exception {
conf = new Configuration();
conf.set(Constants.PATH_STYLE_ACCESS, Boolean.toString(true));
assertTrue(conf.getBoolean(Constants.PATH_STYLE_ACCESS, false));
try {
fs = S3ATestUtils.createTestFileSystem(conf);
assertNotNull(fs);
AmazonS3 s3 = fs.getAmazonS3Client();
assertNotNull(s3);
S3ClientOptions clientOptions = getField(s3, S3ClientOptions.class, "clientOptions");
assertTrue("Expected to find path style access to be switched on!", clientOptions.isPathStyleAccess());
byte[] file = ContractTestUtils.toAsciiByteArray("test file");
ContractTestUtils.writeAndRead(fs, new Path("/path/style/access/testFile"), file, file.length, (int) conf.getLongBytes(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
} catch (final AWSS3IOException e) {
LOG.error("Caught exception: ", e);
// Catch/pass standard path style access behaviour when live bucket
// isn't in the same region as the s3 client default. See
// http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
assertEquals(e.getStatusCode(), HttpStatus.SC_MOVED_PERMANENTLY);
}
}
use of com.talend.shaded.com.amazonaws.services.s3.AmazonS3 in project aws-doc-sdk-examples by awsdocs.
the class CreateBucket method createBucket.
public static Bucket createBucket(String bucket_name) {
final AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
Bucket b = null;
if (s3.doesBucketExist(bucket_name)) {
System.out.format("Bucket %s already exists.\n", bucket_name);
b = getBucket(bucket_name);
} else {
try {
b = s3.createBucket(bucket_name);
} catch (AmazonS3Exception e) {
System.err.println(e.getErrorMessage());
}
}
return b;
}
Aggregations