use of org.apache.hadoop.fs.s3a.S3ClientFactory in project kylo by Teradata.
the class S3FileSystemProvider method listFiles.
@Nonnull
@Override
public List<DataSetFile> listFiles(@Nonnull final Path path, @Nonnull final Configuration conf) {
// Determine the credentials
final AmazonS3 s3;
final URI uri = path.toUri();
if ("s3".equalsIgnoreCase(uri.getScheme()) || "s3bfs".equalsIgnoreCase(uri.getScheme()) || "s3n".equalsIgnoreCase(uri.getScheme())) {
s3 = createS3Client(uri, conf);
} else if ("s3a".equalsIgnoreCase(uri.getScheme())) {
final Class<? extends S3ClientFactory> s3ClientFactoryClass = conf.getClass(Constants.S3_CLIENT_FACTORY_IMPL, Constants.DEFAULT_S3_CLIENT_FACTORY_IMPL, S3ClientFactory.class);
try {
s3 = ReflectionUtils.newInstance(s3ClientFactoryClass, conf).createS3Client(uri);
} catch (final IOException e) {
throw new IllegalArgumentException("Unable to create S3 client: " + e, e);
}
} else {
log.debug("Scheme {} not supported for S3 path: {}", uri.getScheme(), path);
throw new CatalogException("catalog.fs.s3.invalidScheme", uri.getScheme());
}
// Fetch the list of buckets
try {
return s3.listBuckets().stream().map(bucket -> {
final DataSetFile file = new DataSetFile();
file.setName(bucket.getName());
file.setDirectory(true);
file.setModificationTime(bucket.getCreationDate().getTime());
file.setPath(uri.getScheme() + "://" + bucket.getName() + "/");
return file;
}).collect(Collectors.toList());
} finally {
s3.shutdown();
}
}
Aggregations