use of com.amazonaws.services.s3.model.AccessControlList in project alluxio by Alluxio.
the class S3AUnderFileSystem method createInstance.
/**
* Constructs a new instance of {@link S3AUnderFileSystem}.
*
* @param uri the {@link AlluxioURI} for this UFS
* @return the created {@link S3AUnderFileSystem} instance
*/
public static S3AUnderFileSystem createInstance(AlluxioURI uri) {
String bucketName = uri.getHost();
// Set the aws credential system properties based on Alluxio properties, if they are set
if (Configuration.containsKey(PropertyKey.S3A_ACCESS_KEY)) {
System.setProperty(SDKGlobalConfiguration.ACCESS_KEY_SYSTEM_PROPERTY, Configuration.get(PropertyKey.S3A_ACCESS_KEY));
}
if (Configuration.containsKey(PropertyKey.S3A_SECRET_KEY)) {
System.setProperty(SDKGlobalConfiguration.SECRET_KEY_SYSTEM_PROPERTY, Configuration.get(PropertyKey.S3A_SECRET_KEY));
}
// Checks, in order, env variables, system properties, profile file, and instance profile
AWSCredentialsProvider credentials = new AWSCredentialsProviderChain(new DefaultAWSCredentialsProviderChain());
// Set the client configuration based on Alluxio configuration values
ClientConfiguration clientConf = new ClientConfiguration();
// Socket timeout
clientConf.setSocketTimeout(Configuration.getInt(PropertyKey.UNDERFS_S3A_SOCKET_TIMEOUT_MS));
// HTTP protocol
if (Configuration.getBoolean(PropertyKey.UNDERFS_S3A_SECURE_HTTP_ENABLED)) {
clientConf.setProtocol(Protocol.HTTPS);
} else {
clientConf.setProtocol(Protocol.HTTP);
}
// Proxy host
if (Configuration.containsKey(PropertyKey.UNDERFS_S3_PROXY_HOST)) {
clientConf.setProxyHost(Configuration.get(PropertyKey.UNDERFS_S3_PROXY_HOST));
}
// Proxy port
if (Configuration.containsKey(PropertyKey.UNDERFS_S3_PROXY_PORT)) {
clientConf.setProxyPort(Configuration.getInt(PropertyKey.UNDERFS_S3_PROXY_PORT));
}
int numAdminThreads = Configuration.getInt(PropertyKey.UNDERFS_S3_ADMIN_THREADS_MAX);
int numTransferThreads = Configuration.getInt(PropertyKey.UNDERFS_S3_UPLOAD_THREADS_MAX);
int numThreads = Configuration.getInt(PropertyKey.UNDERFS_S3_THREADS_MAX);
if (numThreads < numAdminThreads + numTransferThreads) {
LOG.warn("Configured s3 max threads: {} is less than # admin threads: {} plus transfer " + "threads {}. Using admin threads + transfer threads as max threads instead.");
numThreads = numAdminThreads + numTransferThreads;
}
clientConf.setMaxConnections(numThreads);
// Set client request timeout for all requests since multipart copy is used, and copy parts can
// only be set with the client configuration.
clientConf.setRequestTimeout(Configuration.getInt(PropertyKey.UNDERFS_S3A_REQUEST_TIMEOUT));
AmazonS3Client amazonS3Client = new AmazonS3Client(credentials, clientConf);
// Set a custom endpoint.
if (Configuration.containsKey(PropertyKey.UNDERFS_S3_ENDPOINT)) {
amazonS3Client.setEndpoint(Configuration.get(PropertyKey.UNDERFS_S3_ENDPOINT));
}
// Disable DNS style buckets, this enables path style requests.
if (Configuration.getBoolean(PropertyKey.UNDERFS_S3_DISABLE_DNS_BUCKETS)) {
S3ClientOptions clientOptions = S3ClientOptions.builder().setPathStyleAccess(true).build();
amazonS3Client.setS3ClientOptions(clientOptions);
}
ExecutorService service = ExecutorServiceFactories.fixedThreadPoolExecutorServiceFactory("alluxio-s3-transfer-manager-worker", numTransferThreads).create();
TransferManager transferManager = new TransferManager(amazonS3Client, service);
TransferManagerConfiguration transferConf = new TransferManagerConfiguration();
transferConf.setMultipartCopyThreshold(MULTIPART_COPY_THRESHOLD);
transferManager.setConfiguration(transferConf);
// Default to readable and writable by the user.
short bucketMode = (short) 700;
// There is no known account owner by default.
String accountOwner = "";
// if ACL enabled inherit bucket acl for all the objects.
if (Configuration.getBoolean(PropertyKey.UNDERFS_S3A_INHERIT_ACL)) {
String accountOwnerId = amazonS3Client.getS3AccountOwner().getId();
// Gets the owner from user-defined static mapping from S3 canonical user
// id to Alluxio user name.
String owner = CommonUtils.getValueFromStaticMapping(Configuration.get(PropertyKey.UNDERFS_S3_OWNER_ID_TO_USERNAME_MAPPING), accountOwnerId);
// If there is no user-defined mapping, use the display name.
if (owner == null) {
owner = amazonS3Client.getS3AccountOwner().getDisplayName();
}
accountOwner = owner == null ? accountOwnerId : owner;
AccessControlList acl = amazonS3Client.getBucketAcl(bucketName);
bucketMode = S3AUtils.translateBucketAcl(acl, accountOwnerId);
}
return new S3AUnderFileSystem(uri, amazonS3Client, bucketName, bucketMode, accountOwner, transferManager);
}
use of com.amazonaws.services.s3.model.AccessControlList in project camel by apache.
the class S3Producer method processSingleOp.
public void processSingleOp(final Exchange exchange) throws Exception {
ObjectMetadata objectMetadata = determineMetadata(exchange);
File filePayload = null;
InputStream is = null;
Object obj = exchange.getIn().getMandatoryBody();
PutObjectRequest putObjectRequest = null;
// Need to check if the message body is WrappedFile
if (obj instanceof WrappedFile) {
obj = ((WrappedFile<?>) obj).getFile();
}
if (obj instanceof File) {
filePayload = (File) obj;
is = new FileInputStream(filePayload);
} else {
is = exchange.getIn().getMandatoryBody(InputStream.class);
}
putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), is, objectMetadata);
String storageClass = determineStorageClass(exchange);
if (storageClass != null) {
putObjectRequest.setStorageClass(storageClass);
}
String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
if (cannedAcl != null) {
CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
putObjectRequest.setCannedAcl(objectAcl);
}
AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
if (acl != null) {
// note: if cannedacl and acl are both specified the last one will be used. refer to
// PutObjectRequest#setAccessControlList for more details
putObjectRequest.setAccessControlList(acl);
}
LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);
PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);
LOG.trace("Received result [{}]", putObjectResult);
Message message = getMessageForResponse(exchange);
message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
if (putObjectResult.getVersionId() != null) {
message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
}
if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
// close streams
IOHelper.close(putObjectRequest.getInputStream());
IOHelper.close(is);
FileUtil.deleteFile(filePayload);
}
}
use of com.amazonaws.services.s3.model.AccessControlList in project hippo by NHS-digital-website.
the class S3ConnectorImpl method unpublishResource.
public boolean unpublishResource(String objectPath) {
AccessControlList acl = s3.getObjectAcl(bucketName, objectPath);
acl.revokeAllPermissions(GroupGrantee.AllUsers);
s3.setObjectAcl(bucketName, objectPath, acl);
return true;
}
use of com.amazonaws.services.s3.model.AccessControlList in project druid by druid-io.
the class S3DataSegmentPusherTest method testPushInternal.
private void testPushInternal(boolean useUniquePath, String matcher) throws Exception {
ServerSideEncryptingAmazonS3 s3Client = EasyMock.createStrictMock(ServerSideEncryptingAmazonS3.class);
final AccessControlList acl = new AccessControlList();
acl.setOwner(new Owner("ownerId", "owner"));
acl.grantAllPermissions(new Grant(new CanonicalGrantee(acl.getOwner().getId()), Permission.FullControl));
EasyMock.expect(s3Client.getBucketAcl(EasyMock.eq("bucket"))).andReturn(acl).once();
EasyMock.expect(s3Client.putObject(EasyMock.anyObject())).andReturn(new PutObjectResult()).once();
EasyMock.replay(s3Client);
S3DataSegmentPusherConfig config = new S3DataSegmentPusherConfig();
config.setBucket("bucket");
config.setBaseKey("key");
S3DataSegmentPusher pusher = new S3DataSegmentPusher(s3Client, config);
// Create a mock segment on disk
File tmp = tempFolder.newFile("version.bin");
final byte[] data = new byte[] { 0x0, 0x0, 0x0, 0x1 };
Files.write(data, tmp);
final long size = data.length;
DataSegment segmentToPush = new DataSegment("foo", Intervals.of("2015/2016"), "0", new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, size);
DataSegment segment = pusher.push(tempFolder.getRoot(), segmentToPush, useUniquePath);
Assert.assertEquals(segmentToPush.getSize(), segment.getSize());
Assert.assertEquals(1, (int) segment.getBinaryVersion());
Assert.assertEquals("bucket", segment.getLoadSpec().get("bucket"));
Assert.assertTrue(segment.getLoadSpec().get("key").toString(), Pattern.compile(matcher).matcher(segment.getLoadSpec().get("key").toString()).matches());
Assert.assertEquals("s3_zip", segment.getLoadSpec().get("type"));
EasyMock.verify(s3Client);
}
use of com.amazonaws.services.s3.model.AccessControlList in project aws-doc-sdk-examples by awsdocs.
the class ModifyACLExistingObject method main.
public static void main(String[] args) throws IOException {
Regions clientRegion = Regions.DEFAULT_REGION;
String bucketName = "*** Bucket name ***";
String keyName = "*** Key name ***";
String emailGrantee = "*** user@example.com ***";
try {
AmazonS3 s3Client = AmazonS3ClientBuilder.standard().withCredentials(new ProfileCredentialsProvider()).withRegion(clientRegion).build();
// Get the existing object ACL that we want to modify.
AccessControlList acl = s3Client.getObjectAcl(bucketName, keyName);
// Clear the existing list of grants.
acl.getGrantsAsList().clear();
// Grant a sample set of permissions, using the existing ACL owner for Full Control permissions.
acl.grantPermission(new CanonicalGrantee(acl.getOwner().getId()), Permission.FullControl);
acl.grantPermission(new EmailAddressGrantee(emailGrantee), Permission.WriteAcp);
// Save the modified ACL back to the object.
s3Client.setObjectAcl(bucketName, keyName, acl);
} catch (AmazonServiceException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
} catch (SdkClientException e) {
// Amazon S3 couldn't be contacted for a response, or the client
// couldn't parse the response from Amazon S3.
e.printStackTrace();
}
}
Aggregations