Search in sources :

Example 1 with Owner

use of com.amazonaws.services.s3.model.Owner in project elasticsearch by elastic.

the class S3BlobStoreTests method testInitCannedACL.

public void testInitCannedACL() throws IOException {
    String[] aclList = new String[] { "private", "public-read", "public-read-write", "authenticated-read", "log-delivery-write", "bucket-owner-read", "bucket-owner-full-control" };
    //empty acl
    assertThat(S3BlobStore.initCannedACL(null), equalTo(CannedAccessControlList.Private));
    assertThat(S3BlobStore.initCannedACL(""), equalTo(CannedAccessControlList.Private));
    // it should init cannedACL correctly
    for (String aclString : aclList) {
        CannedAccessControlList acl = S3BlobStore.initCannedACL(aclString);
        assertThat(acl.toString(), equalTo(aclString));
    }
    // it should accept all aws cannedACLs
    for (CannedAccessControlList awsList : CannedAccessControlList.values()) {
        CannedAccessControlList acl = S3BlobStore.initCannedACL(awsList.toString());
        assertThat(acl, equalTo(awsList));
    }
}
Also used : CannedAccessControlList(com.amazonaws.services.s3.model.CannedAccessControlList)

Example 2 with Owner

use of com.amazonaws.services.s3.model.Owner in project alluxio by Alluxio.

the class S3AUnderFileSystem method createInstance.

/**
   * Constructs a new instance of {@link S3AUnderFileSystem}.
   *
   * @param uri the {@link AlluxioURI} for this UFS
   * @return the created {@link S3AUnderFileSystem} instance
   */
public static S3AUnderFileSystem createInstance(AlluxioURI uri) {
    String bucketName = uri.getHost();
    // Set the aws credential system properties based on Alluxio properties, if they are set
    if (Configuration.containsKey(PropertyKey.S3A_ACCESS_KEY)) {
        System.setProperty(SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY, Configuration.get(PropertyKey.S3A_ACCESS_KEY));
    }
    if (Configuration.containsKey(PropertyKey.S3A_SECRET_KEY)) {
        System.setProperty(SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY, Configuration.get(PropertyKey.S3A_SECRET_KEY));
    }
    // Checks, in order, env variables, system properties, profile file, and instance profile
    AWSCredentialsProvider credentials = new AWSCredentialsProviderChain(new DefaultAWSCredentialsProviderChain());
    // Set the client configuration based on Alluxio configuration values
    ClientConfiguration clientConf = new ClientConfiguration();
    // Socket timeout
    clientConf.setSocketTimeout(Configuration.getInt(PropertyKey.UNDERFS_S3A_SOCKET_TIMEOUT_MS));
    // HTTP protocol
    if (Configuration.getBoolean(PropertyKey.UNDERFS_S3A_SECURE_HTTP_ENABLED)) {
        clientConf.setProtocol(Protocol.HTTPS);
    } else {
        clientConf.setProtocol(Protocol.HTTP);
    }
    // Proxy host
    if (Configuration.containsKey(PropertyKey.UNDERFS_S3_PROXY_HOST)) {
        clientConf.setProxyHost(Configuration.get(PropertyKey.UNDERFS_S3_PROXY_HOST));
    }
    // Proxy port
    if (Configuration.containsKey(PropertyKey.UNDERFS_S3_PROXY_PORT)) {
        clientConf.setProxyPort(Configuration.getInt(PropertyKey.UNDERFS_S3_PROXY_PORT));
    }
    int numAdminThreads = Configuration.getInt(PropertyKey.UNDERFS_S3_ADMIN_THREADS_MAX);
    int numTransferThreads = Configuration.getInt(PropertyKey.UNDERFS_S3_UPLOAD_THREADS_MAX);
    int numThreads = Configuration.getInt(PropertyKey.UNDERFS_S3_THREADS_MAX);
    if (numThreads < numAdminThreads + numTransferThreads) {
        LOG.warn("Configured s3 max threads: {} is less than # admin threads: {} plus transfer " + "threads {}. Using admin threads + transfer threads as max threads instead.");
        numThreads = numAdminThreads + numTransferThreads;
    }
    clientConf.setMaxConnections(numThreads);
    // Set client request timeout for all requests since multipart copy is used, and copy parts can
    // only be set with the client configuration.
    clientConf.setRequestTimeout(Configuration.getInt(PropertyKey.UNDERFS_S3A_REQUEST_TIMEOUT));
    AmazonS3Client amazonS3Client = new AmazonS3Client(credentials, clientConf);
    // Set a custom endpoint.
    if (Configuration.containsKey(PropertyKey.UNDERFS_S3_ENDPOINT)) {
        amazonS3Client.setEndpoint(Configuration.get(PropertyKey.UNDERFS_S3_ENDPOINT));
    }
    // Disable DNS style buckets, this enables path style requests.
    if (Configuration.getBoolean(PropertyKey.UNDERFS_S3_DISABLE_DNS_BUCKETS)) {
        S3ClientOptions clientOptions = S3ClientOptions.builder().setPathStyleAccess(true).build();
        amazonS3Client.setS3ClientOptions(clientOptions);
    }
    ExecutorService service = ExecutorServiceFactories.fixedThreadPoolExecutorServiceFactory("alluxio-s3-transfer-manager-worker", numTransferThreads).create();
    TransferManager transferManager = new TransferManager(amazonS3Client, service);
    TransferManagerConfiguration transferConf = new TransferManagerConfiguration();
    transferConf.setMultipartCopyThreshold(MULTIPART_COPY_THRESHOLD);
    transferManager.setConfiguration(transferConf);
    // Default to readable and writable by the user.
    short bucketMode = (short) 700;
    // There is no known account owner by default.
    String accountOwner = "";
    // if ACL enabled inherit bucket acl for all the objects.
    if (Configuration.getBoolean(PropertyKey.UNDERFS_S3A_INHERIT_ACL)) {
        String accountOwnerId = amazonS3Client.getS3AccountOwner().getId();
        // Gets the owner from user-defined static mapping from S3 canonical user
        // id to Alluxio user name.
        String owner = CommonUtils.getValueFromStaticMapping(Configuration.get(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING), accountOwnerId);
        // If there is no user-defined mapping, use the display name.
        if (owner == null) {
            owner = amazonS3Client.getS3AccountOwner().getDisplayName();
        }
        accountOwner = owner == null ? accountOwnerId : owner;
        AccessControlList acl = amazonS3Client.getBucketAcl(bucketName);
        bucketMode = S3AUtils.translateBucketAcl(acl, accountOwnerId);
    }
    return new S3AUnderFileSystem(uri, amazonS3Client, bucketName, bucketMode, accountOwner, transferManager);
}
Also used : DefaultAWSCredentialsProviderChain(com.amazonaws.auth.DefaultAWSCredentialsProviderChain) AccessControlList(com.amazonaws.services.s3.model.AccessControlList) TransferManager(com.amazonaws.services.s3.transfer.TransferManager) AWSCredentialsProviderChain(com.amazonaws.auth.AWSCredentialsProviderChain) DefaultAWSCredentialsProviderChain(com.amazonaws.auth.DefaultAWSCredentialsProviderChain) AmazonS3Client(com.amazonaws.services.s3.AmazonS3Client) TransferManagerConfiguration(com.amazonaws.services.s3.transfer.TransferManagerConfiguration) S3ClientOptions(com.amazonaws.services.s3.S3ClientOptions) ExecutorService(java.util.concurrent.ExecutorService) AWSCredentialsProvider(com.amazonaws.auth.AWSCredentialsProvider) ClientConfiguration(com.amazonaws.ClientConfiguration)

Example 3 with Owner

use of com.amazonaws.services.s3.model.Owner in project camel by apache.

the class AmazonS3ClientMock method listBuckets.

@Override
public List<Bucket> listBuckets() throws AmazonClientException, AmazonServiceException {
    ArrayList<Bucket> list = new ArrayList<Bucket>();
    Bucket bucket = new Bucket("camel-bucket");
    bucket.setOwner(new Owner("Camel", "camel"));
    bucket.setCreationDate(new Date());
    list.add(bucket);
    return list;
}
Also used : Owner(com.amazonaws.services.s3.model.Owner) Bucket(com.amazonaws.services.s3.model.Bucket) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Date(java.util.Date)

Example 4 with Owner

use of com.amazonaws.services.s3.model.Owner in project druid by druid-io.

the class S3DataSegmentPusherTest method testPushInternal.

private void testPushInternal(boolean useUniquePath, String matcher) throws Exception {
    ServerSideEncryptingAmazonS3 s3Client = EasyMock.createStrictMock(ServerSideEncryptingAmazonS3.class);
    final AccessControlList acl = new AccessControlList();
    acl.setOwner(new Owner("ownerId", "owner"));
    acl.grantAllPermissions(new Grant(new CanonicalGrantee(acl.getOwner().getId()), Permission.FullControl));
    EasyMock.expect(s3Client.getBucketAcl(EasyMock.eq("bucket"))).andReturn(acl).once();
    EasyMock.expect(s3Client.putObject(EasyMock.anyObject())).andReturn(new PutObjectResult()).once();
    EasyMock.replay(s3Client);
    S3DataSegmentPusherConfig config = new S3DataSegmentPusherConfig();
    config.setBucket("bucket");
    config.setBaseKey("key");
    S3DataSegmentPusher pusher = new S3DataSegmentPusher(s3Client, config);
    // Create a mock segment on disk
    File tmp = tempFolder.newFile("version.bin");
    final byte[] data = new byte[] { 0x0, 0x0, 0x0, 0x1 };
    Files.write(data, tmp);
    final long size = data.length;
    DataSegment segmentToPush = new DataSegment("foo", Intervals.of("2015/2016"), "0", new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, size);
    DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush, useUniquePath);
    Assert.assertEquals(segmentToPush.getSize(), segment.getSize());
    Assert.assertEquals(1, (int) segment.getBinaryVersion());
    Assert.assertEquals("bucket", segment.getLoadSpec().get("bucket"));
    Assert.assertTrue(segment.getLoadSpec().get("key").toString(), Pattern.compile(matcher).matcher(segment.getLoadSpec().get("key").toString()).matches());
    Assert.assertEquals("s3_zip", segment.getLoadSpec().get("type"));
    EasyMock.verify(s3Client);
}
Also used : AccessControlList(com.amazonaws.services.s3.model.AccessControlList) Grant(com.amazonaws.services.s3.model.Grant) Owner(com.amazonaws.services.s3.model.Owner) PutObjectResult(com.amazonaws.services.s3.model.PutObjectResult) DataSegment(org.apache.druid.timeline.DataSegment) CanonicalGrantee(com.amazonaws.services.s3.model.CanonicalGrantee) File(java.io.File)

Example 5 with Owner

use of com.amazonaws.services.s3.model.Owner in project druid by druid-io.

the class S3Utils method uploadFileIfPossible.

/**
 * Uploads a file to S3 if possible. First trying to set ACL to give the bucket owner full control of the file before uploading.
 *
 * @param service    S3 client
 * @param disableAcl true if ACL shouldn't be set for the file
 * @param key        The key under which to store the new object.
 * @param file       The path of the file to upload to Amazon S3.
 */
static void uploadFileIfPossible(ServerSideEncryptingAmazonS3 service, boolean disableAcl, String bucket, String key, File file) {
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, file);
    if (!disableAcl) {
        putObjectRequest.setAccessControlList(S3Utils.grantFullControlToBucketOwner(service, bucket));
    }
    log.info("Pushing [%s] to bucket[%s] and key[%s].", file, bucket, key);
    service.putObject(putObjectRequest);
}
Also used : PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Aggregations

AccessControlList (com.amazonaws.services.s3.model.AccessControlList)10 Owner (com.amazonaws.services.s3.model.Owner)10 CanonicalGrantee (com.amazonaws.services.s3.model.CanonicalGrantee)4 Test (org.junit.Test)4 AmazonServiceException (com.amazonaws.AmazonServiceException)3 AmazonS3 (com.amazonaws.services.s3.AmazonS3)3 CannedAccessControlList (com.amazonaws.services.s3.model.CannedAccessControlList)3 HashMap (java.util.HashMap)3 AlluxioURI (alluxio.AlluxioURI)2 ConfigurationRule (alluxio.ConfigurationRule)2 PropertyKey (alluxio.conf.PropertyKey)2 ObjectUnderFileSystem (alluxio.underfs.ObjectUnderFileSystem)2 UnderFileSystemConfiguration (alluxio.underfs.UnderFileSystemConfiguration)2 AmazonClientException (com.amazonaws.AmazonClientException)2 SdkClientException (com.amazonaws.SdkClientException)2 Regions (com.amazonaws.regions.Regions)2 Bucket (com.amazonaws.services.s3.model.Bucket)2 EmailAddressGrantee (com.amazonaws.services.s3.model.EmailAddressGrantee)2 Grant (com.amazonaws.services.s3.model.Grant)2 Grantee (com.amazonaws.services.s3.model.Grantee)2