Search in sources :

Example 26 with Region

use of com.amazonaws.services.s3.model.Region in project h2o-3 by h2oai.

the class PersistS3 method configureClient.

static AmazonS3Client configureClient(AmazonS3Client s3Client) {
    if (System.getProperty(S3_REGION) != null) {
        String region = System.getProperty(S3_REGION);
        Log.debug("S3 region specified: ", region);
        s3Client.setRegion(RegionUtils.getRegion(region));
    }
    // Region overrides end-point settings
    if (System.getProperty(S3_END_POINT) != null) {
        String endPoint = System.getProperty(S3_END_POINT);
        Log.debug("S3 endpoint specified: ", endPoint);
        s3Client.setEndpoint(endPoint);
    }
    if (System.getProperty(S3_ENABLE_PATH_STYLE) != null && Boolean.valueOf(System.getProperty(S3_ENABLE_PATH_STYLE))) {
        Log.debug("S3 path style access enabled");
        S3ClientOptions sco = new S3ClientOptions();
        sco.setPathStyleAccess(true);
        s3Client.setS3ClientOptions(sco);
    }
    return s3Client;
}
Also used : S3ClientOptions(com.amazonaws.services.s3.S3ClientOptions)

Example 27 with Region

use of com.amazonaws.services.s3.model.Region in project camel by apache.

the class S3Endpoint method doStart.

@Override
public void doStart() throws Exception {
    super.doStart();
    s3Client = configuration.getAmazonS3Client() != null ? configuration.getAmazonS3Client() : createS3Client();
    if (ObjectHelper.isNotEmpty(configuration.getAmazonS3Endpoint())) {
        s3Client.setEndpoint(configuration.getAmazonS3Endpoint());
    }
    String fileName = getConfiguration().getFileName();
    if (fileName != null) {
        LOG.trace("File name [{}] requested, so skipping bucket check...", fileName);
        return;
    }
    String bucketName = getConfiguration().getBucketName();
    LOG.trace("Querying whether bucket [{}] already exists...", bucketName);
    String prefix = getConfiguration().getPrefix();
    try {
        s3Client.listObjects(new ListObjectsRequest(bucketName, prefix, null, null, 0));
        LOG.trace("Bucket [{}] already exists", bucketName);
        return;
    } catch (AmazonServiceException ase) {
        /* 404 means the bucket doesn't exist */
        if (ase.getStatusCode() != 404) {
            throw ase;
        }
    }
    LOG.trace("Bucket [{}] doesn't exist yet", bucketName);
    // creates the new bucket because it doesn't exist yet
    CreateBucketRequest createBucketRequest = new CreateBucketRequest(getConfiguration().getBucketName());
    if (getConfiguration().getRegion() != null) {
        createBucketRequest.setRegion(getConfiguration().getRegion());
    }
    LOG.trace("Creating bucket [{}] in region [{}] with request [{}]...", configuration.getBucketName(), configuration.getRegion(), createBucketRequest);
    s3Client.createBucket(createBucketRequest);
    LOG.trace("Bucket created");
    if (configuration.getPolicy() != null) {
        LOG.trace("Updating bucket [{}] with policy [{}]", bucketName, configuration.getPolicy());
        s3Client.setBucketPolicy(bucketName, configuration.getPolicy());
        LOG.trace("Bucket policy updated");
    }
}
Also used : ListObjectsRequest(com.amazonaws.services.s3.model.ListObjectsRequest) CreateBucketRequest(com.amazonaws.services.s3.model.CreateBucketRequest) AmazonServiceException(com.amazonaws.AmazonServiceException)

Example 28 with Region

use of com.amazonaws.services.s3.model.Region in project hadoop by apache.

the class ITestS3AConfiguration method testEndpoint.

/**
   * Test if custom endpoint is picked up.
   * <p>
   * The test expects {@link S3ATestConstants#CONFIGURATION_TEST_ENDPOINT}
   * to be defined in the Configuration
   * describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
   * (i.e. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
   * Evidently, the bucket has to be hosted in the region denoted by the
   * endpoint for the test to succeed.
   * <p>
   * More info and the list of endpoint identifiers:
   * @see <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">endpoint list</a>.
   *
   * @throws Exception
   */
@Test
public void testEndpoint() throws Exception {
    conf = new Configuration();
    String endpoint = conf.getTrimmed(S3ATestConstants.CONFIGURATION_TEST_ENDPOINT, "");
    if (endpoint.isEmpty()) {
        LOG.warn("Custom endpoint test skipped as " + S3ATestConstants.CONFIGURATION_TEST_ENDPOINT + "config " + "setting was not detected");
    } else {
        conf.set(Constants.ENDPOINT, endpoint);
        fs = S3ATestUtils.createTestFileSystem(conf);
        AmazonS3 s3 = fs.getAmazonS3Client();
        String endPointRegion = "";
        // Differentiate handling of "s3-" and "s3." based endpoint identifiers
        String[] endpointParts = StringUtils.split(endpoint, '.');
        if (endpointParts.length == 3) {
            endPointRegion = endpointParts[0].substring(3);
        } else if (endpointParts.length == 4) {
            endPointRegion = endpointParts[1];
        } else {
            fail("Unexpected endpoint");
        }
        assertEquals("Endpoint config setting and bucket location differ: ", endPointRegion, s3.getBucketLocation(fs.getUri().getHost()));
    }
}
Also used : AmazonS3(com.amazonaws.services.s3.AmazonS3) Configuration(org.apache.hadoop.conf.Configuration) ClientConfiguration(com.amazonaws.ClientConfiguration) Test(org.junit.Test)

Example 29 with Region

use of com.amazonaws.services.s3.model.Region in project hadoop by apache.

the class ITestS3AConfiguration method shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty.

@Test
public void shouldBeAbleToSwitchOnS3PathStyleAccessViaConfigProperty() throws Exception {
    conf = new Configuration();
    conf.set(Constants.PATH_STYLE_ACCESS, Boolean.toString(true));
    assertTrue(conf.getBoolean(Constants.PATH_STYLE_ACCESS, false));
    try {
        fs = S3ATestUtils.createTestFileSystem(conf);
        assertNotNull(fs);
        AmazonS3 s3 = fs.getAmazonS3Client();
        assertNotNull(s3);
        S3ClientOptions clientOptions = getField(s3, S3ClientOptions.class, "clientOptions");
        assertTrue("Expected to find path style access to be switched on!", clientOptions.isPathStyleAccess());
        byte[] file = ContractTestUtils.toAsciiByteArray("test file");
        ContractTestUtils.writeAndRead(fs, new Path("/path/style/access/testFile"), file, file.length, (int) conf.getLongBytes(Constants.FS_S3A_BLOCK_SIZE, file.length), false, true);
    } catch (final AWSS3IOException e) {
        LOG.error("Caught exception: ", e);
        // Catch/pass standard path style access behaviour when live bucket
        // isn't in the same region as the s3 client default. See
        // http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
        assertEquals(e.getStatusCode(), HttpStatus.SC_MOVED_PERMANENTLY);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) AmazonS3(com.amazonaws.services.s3.AmazonS3) Configuration(org.apache.hadoop.conf.Configuration) ClientConfiguration(com.amazonaws.ClientConfiguration) S3ClientOptions(com.amazonaws.services.s3.S3ClientOptions) Test(org.junit.Test)

Example 30 with Region

use of com.amazonaws.services.s3.model.Region in project nifi by apache.

the class AbstractS3IT method oneTimeSetup.

@BeforeClass
public static void oneTimeSetup() {
    // Creates a client and bucket for this test
    final FileInputStream fis;
    try {
        fis = new FileInputStream(CREDENTIALS_FILE);
    } catch (FileNotFoundException e1) {
        fail("Could not open credentials file " + CREDENTIALS_FILE + ": " + e1.getLocalizedMessage());
        return;
    }
    try {
        final PropertiesCredentials credentials = new PropertiesCredentials(fis);
        client = new AmazonS3Client(credentials);
        if (client.doesBucketExist(BUCKET_NAME)) {
            fail("Bucket " + BUCKET_NAME + " exists. Choose a different bucket name to continue test");
        }
        CreateBucketRequest request = REGION.contains("east") ? // See https://github.com/boto/boto3/issues/125
        new CreateBucketRequest(BUCKET_NAME) : new CreateBucketRequest(BUCKET_NAME, REGION);
        client.createBucket(request);
    } catch (final AmazonS3Exception e) {
        fail("Can't create the key " + BUCKET_NAME + ": " + e.getLocalizedMessage());
    } catch (final IOException e) {
        fail("Caught IOException preparing tests: " + e.getLocalizedMessage());
    } finally {
        FileUtils.closeQuietly(fis);
    }
    if (!client.doesBucketExist(BUCKET_NAME)) {
        fail("Setup incomplete, tests will fail");
    }
}
Also used : AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) CreateBucketRequest(com.amazonaws.services.s3.model.CreateBucketRequest) FileNotFoundException(java.io.FileNotFoundException) PropertiesCredentials(com.amazonaws.auth.PropertiesCredentials) AmazonS3Exception(com.amazonaws.services.s3.model.AmazonS3Exception) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) BeforeClass(org.junit.BeforeClass)

Aggregations

AmazonS3 (com.amazonaws.services.s3.AmazonS3)18 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)17 IOException (java.io.IOException)12 AmazonServiceException (com.amazonaws.AmazonServiceException)11 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)11 Test (org.junit.Test)10 AmazonClientException (com.amazonaws.AmazonClientException)9 BasicAWSCredentials (com.amazonaws.auth.BasicAWSCredentials)9 Regions (com.amazonaws.regions.Regions)9 HashMap (java.util.HashMap)9 Date (java.util.Date)8 Map (java.util.Map)8 ClientConfiguration (com.amazonaws.ClientConfiguration)7 AmazonS3ClientBuilder (com.amazonaws.services.s3.AmazonS3ClientBuilder)7 S3Object (com.amazonaws.services.s3.model.S3Object)7 AWSKMS (com.amazonaws.services.kms.AWSKMS)6 TransferManager (com.amazonaws.services.s3.transfer.TransferManager)6 ByteArrayInputStream (java.io.ByteArrayInputStream)6 FileNotFoundException (java.io.FileNotFoundException)6 InputStream (java.io.InputStream)6