use of org.apache.flink.fs.s3.common.FlinkS3FileSystem in project flink by apache.
the class PrestoS3RecoverableWriterTest method requestingRecoverableWriterShouldThroughException.
// ----------------------- Tests -----------------------
@Test(expected = UnsupportedOperationException.class)
public void requestingRecoverableWriterShouldThroughException() throws Exception {
URI s3Uri = URI.create(S3TestCredentials.getTestBucketUri());
FlinkS3FileSystem fileSystem = (FlinkS3FileSystem) FileSystem.get(s3Uri);
fileSystem.createRecoverableWriter();
}
use of org.apache.flink.fs.s3.common.FlinkS3FileSystem in project flink by apache.
the class AbstractS3FileSystemFactory method create.
@Override
public FileSystem create(URI fsUri) throws IOException {
Configuration flinkConfig = this.flinkConfig;
if (flinkConfig == null) {
LOG.warn("Creating S3 FileSystem without configuring the factory. All behavior will be default.");
flinkConfig = new Configuration();
}
LOG.debug("Creating S3 file system backed by {}", name);
LOG.debug("Loading Hadoop configuration for {}", name);
try {
// create the Hadoop FileSystem
org.apache.hadoop.conf.Configuration hadoopConfig = hadoopConfigLoader.getOrLoadHadoopConfig();
org.apache.hadoop.fs.FileSystem fs = createHadoopFileSystem();
fs.initialize(getInitURI(fsUri, hadoopConfig), hadoopConfig);
// load the entropy injection settings
String entropyInjectionKey = flinkConfig.getString(ENTROPY_INJECT_KEY_OPTION);
int numEntropyChars = -1;
if (entropyInjectionKey != null) {
if (entropyInjectionKey.matches(INVALID_ENTROPY_KEY_CHARS)) {
throw new IllegalConfigurationException("Invalid character in value for " + ENTROPY_INJECT_KEY_OPTION.key() + " : " + entropyInjectionKey);
}
numEntropyChars = flinkConfig.getInteger(ENTROPY_INJECT_LENGTH_OPTION);
if (numEntropyChars <= 0) {
throw new IllegalConfigurationException(ENTROPY_INJECT_LENGTH_OPTION.key() + " must configure a value > 0");
}
}
final String[] localTmpDirectories = ConfigurationUtils.parseTempDirectories(flinkConfig);
Preconditions.checkArgument(localTmpDirectories.length > 0);
final String localTmpDirectory = localTmpDirectories[0];
final long s3minPartSize = flinkConfig.getLong(PART_UPLOAD_MIN_SIZE);
final int maxConcurrentUploads = flinkConfig.getInteger(MAX_CONCURRENT_UPLOADS);
final S3AccessHelper s3AccessHelper = getS3AccessHelper(fs);
return new FlinkS3FileSystem(fs, localTmpDirectory, entropyInjectionKey, numEntropyChars, s3AccessHelper, s3minPartSize, maxConcurrentUploads);
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e.getMessage(), e);
}
}
use of org.apache.flink.fs.s3.common.FlinkS3FileSystem in project flink by apache.
the class PrestoS3FileSystemTest method validateBasicCredentials.
// ------------------------------------------------------------------------
// utilities
// ------------------------------------------------------------------------
private static void validateBasicCredentials(FileSystem fs) throws Exception {
assertTrue(fs instanceof FlinkS3FileSystem);
org.apache.hadoop.fs.FileSystem hadoopFs = ((FlinkS3FileSystem) fs).getHadoopFileSystem();
assertTrue(hadoopFs instanceof PrestoS3FileSystem);
try (PrestoS3FileSystem prestoFs = (PrestoS3FileSystem) hadoopFs) {
AWSCredentialsProvider provider = getAwsCredentialsProvider(prestoFs);
assertTrue(provider instanceof AWSStaticCredentialsProvider);
}
}
Aggregations