use of org.apache.iceberg.aws.s3.S3FileIO in project iceberg by apache.
the class DynamoDbCatalog method initializeFileIO.
private FileIO initializeFileIO(Map<String, String> properties) {
String fileIOImpl = properties.get(CatalogProperties.FILE_IO_IMPL);
if (fileIOImpl == null) {
FileIO io = new S3FileIO();
io.initialize(properties);
return io;
} else {
return CatalogUtil.loadFileIO(fileIOImpl, properties, hadoopConf);
}
}
use of org.apache.iceberg.aws.s3.S3FileIO in project iceberg by apache.
the class TestAssumeRoleAwsClientFactory method testAssumeRoleS3FileIO.
@Test
public void testAssumeRoleS3FileIO() throws Exception {
String bucketArn = "arn:aws:s3:::" + AwsIntegTestUtil.testBucketName();
iam.putRolePolicy(PutRolePolicyRequest.builder().roleName(roleName).policyName(policyName).policyDocument("{" + "\"Version\":\"2012-10-17\"," + "\"Statement\":[{" + "\"Sid\":\"policy1\"," + "\"Effect\":\"Allow\"," + "\"Action\":\"s3:ListBucket\"," + "\"Resource\":[\"" + bucketArn + "\"]," + "\"Condition\":{\"StringLike\":{\"s3:prefix\":[\"allowed/*\"]}}} ,{" + "\"Sid\":\"policy2\"," + "\"Effect\":\"Allow\"," + "\"Action\":\"s3:GetObject\"," + "\"Resource\":[\"" + bucketArn + "/allowed/*\"]}]}").build());
waitForIamConsistency();
S3FileIO s3FileIO = new S3FileIO();
s3FileIO.initialize(assumeRoleProperties);
InputFile inputFile = s3FileIO.newInputFile("s3://" + AwsIntegTestUtil.testBucketName() + "/denied/file");
try {
inputFile.exists();
Assert.fail("Access to s3 should be denied");
} catch (S3Exception e) {
Assert.assertEquals("Should see 403 error code", 403, e.statusCode());
}
inputFile = s3FileIO.newInputFile("s3://" + AwsIntegTestUtil.testBucketName() + "/allowed/file");
Assert.assertFalse("should be able to access file", inputFile.exists());
}
use of org.apache.iceberg.aws.s3.S3FileIO in project iceberg by apache.
the class GlueTestBase method beforeClass.
@BeforeClass
public static void beforeClass() {
String testBucketPath = "s3://" + testBucketName + "/" + testPathPrefix;
S3FileIO fileIO = new S3FileIO(clientFactory::s3);
glueCatalog = new GlueCatalog();
glueCatalog.initialize(catalogName, testBucketPath, new AwsProperties(), glue, LockManagers.defaultLockManager(), fileIO);
AwsProperties properties = new AwsProperties();
properties.setGlueCatalogSkipArchive(true);
glueCatalogWithSkip = new GlueCatalog();
glueCatalogWithSkip.initialize(catalogName, testBucketPath, properties, glue, LockManagers.defaultLockManager(), fileIO);
}
use of org.apache.iceberg.aws.s3.S3FileIO in project iceberg by apache.
the class TestGlueCatalogLock method beforeClass.
@BeforeClass
public static void beforeClass() {
GlueTestBase.beforeClass();
String testBucketPath = "s3://" + testBucketName + "/" + testPathPrefix;
lockTableName = getRandomName();
S3FileIO fileIO = new S3FileIO(clientFactory::s3);
glueCatalog = new GlueCatalog();
AwsProperties awsProperties = new AwsProperties();
dynamo = clientFactory.dynamo();
glueCatalog.initialize(catalogName, testBucketPath, awsProperties, glue, new DynamoDbLockManager(dynamo, lockTableName), fileIO);
}
use of org.apache.iceberg.aws.s3.S3FileIO in project iceberg by apache.
the class GlueCatalog method initializeFileIO.
private FileIO initializeFileIO(Map<String, String> properties) {
String fileIOImpl = properties.get(CatalogProperties.FILE_IO_IMPL);
if (fileIOImpl == null) {
FileIO io = new S3FileIO();
io.initialize(properties);
return io;
} else {
return CatalogUtil.loadFileIO(fileIOImpl, properties, hadoopConf);
}
}
Aggregations