use of com.upplication.s3fs.S3FileSystem in project geowave by locationtech.
the class SparkIngestDriver method runOperation.
public boolean runOperation(final File configFile, final LocalInputCommandLineOptions localInput, final String inputStoreName, final String indexList, final VisibilityOptions ingestOptions, final SparkCommandLineOptions sparkOptions, final String basePath, final Console console) throws IOException {
final Properties configProperties = ConfigOptions.loadProperties(configFile);
JavaSparkContext jsc = null;
SparkSession session = null;
int numExecutors;
int numCores;
int numPartitions;
Path inputPath;
String s3EndpointUrl = null;
final boolean isS3 = basePath.startsWith("s3://");
final boolean isHDFS = !isS3 && (basePath.startsWith("hdfs://") || basePath.startsWith("file:/"));
// If input path is S3
if (isS3) {
s3EndpointUrl = ConfigAWSCommand.getS3Url(configProperties);
inputPath = URLIngestUtils.setupS3FileSystem(basePath, s3EndpointUrl);
} else // If input path is HDFS
if (isHDFS) {
final String hdfsFSUrl = ConfigHDFSCommand.getHdfsUrl(configProperties);
inputPath = setUpHDFSFilesystem(basePath, hdfsFSUrl, basePath.startsWith("file:/"));
} else {
LOGGER.warn("Spark ingest support only S3 or HDFS as input location");
return false;
}
if ((inputPath == null) || (!Files.exists(inputPath))) {
LOGGER.error("Error in accessing Input path " + basePath);
return false;
}
final List<Path> inputFileList = new ArrayList<>();
Files.walkFileTree(inputPath, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
inputFileList.add(file);
return FileVisitResult.CONTINUE;
}
});
final int numInputFiles = inputFileList.size();
if (sparkOptions.getNumExecutors() < 1) {
numExecutors = (int) Math.ceil((double) numInputFiles / 8);
} else {
numExecutors = sparkOptions.getNumExecutors();
}
if (sparkOptions.getNumCores() < 1) {
numCores = 4;
} else {
numCores = sparkOptions.getNumCores();
}
numPartitions = numExecutors * numCores * 2;
if (session == null) {
String jar = "";
try {
jar = SparkIngestDriver.class.getProtectionDomain().getCodeSource().getLocation().toURI().getPath();
} catch (final URISyntaxException e) {
LOGGER.error("Unable to set jar location in spark configuration", e);
}
session = SparkSession.builder().appName(sparkOptions.getAppName()).master(sparkOptions.getMaster()).config("spark.driver.host", sparkOptions.getHost()).config("spark.jars", jar).config("spark.executor.instances", Integer.toString(numExecutors)).config("spark.executor.cores", Integer.toString(numCores)).getOrCreate();
jsc = JavaSparkContext.fromSparkContext(session.sparkContext());
}
final JavaRDD<URI> fileRDD = jsc.parallelize(Lists.transform(inputFileList, path -> path.toUri()), numPartitions);
if (isS3) {
final String s3FinalEndpointUrl = s3EndpointUrl;
fileRDD.foreachPartition(uri -> {
final S3FileSystem fs = initializeS3FS(s3FinalEndpointUrl);
final List<URI> inputFiles = new ArrayList<>();
while (uri.hasNext()) {
final Path inputFile = fs.getPath(uri.next().toString().replaceFirst(s3FinalEndpointUrl, ""));
inputFiles.add(inputFile.toUri());
}
processInput(configFile, localInput, inputStoreName, indexList, ingestOptions, configProperties, inputFiles.iterator(), console);
});
} else if (isHDFS) {
try {
setHdfsURLStreamHandlerFactory();
} catch (NoSuchFieldException | SecurityException | IllegalArgumentException | IllegalAccessException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
fileRDD.foreachPartition(uri -> {
processInput(configFile, localInput, inputStoreName, indexList, ingestOptions, configProperties, uri, new JCommander().getConsole());
});
}
close(session);
return true;
}
use of com.upplication.s3fs.S3FileSystem in project geowave by locationtech.
the class DefaultGeoWaveAWSCredentialsProviderTest method testAnonymousAccess.
@Test
public void testAnonymousAccess() throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException, URISyntaxException, IOException {
final File temp = File.createTempFile("temp", Long.toString(System.nanoTime()));
temp.mkdirs();
final S3Mock mockS3 = new S3Mock.Builder().withPort(8001).withFileBackend(temp.getAbsolutePath()).withInMemoryBackend().build();
mockS3.start();
URLIngestUtils.setURLStreamHandlerFactory(URLTYPE.S3);
final SparkIngestDriver sparkDriver = new SparkIngestDriver();
final S3FileSystem s3 = sparkDriver.initializeS3FS("s3://s3.amazonaws.com");
s3.getClient().setEndpoint("http://127.0.0.1:8001");
s3.getClient().createBucket("testbucket");
s3.getClient().putObject("testbucket", "test", "content");
try (Stream<Path> s = Files.list(URLIngestUtils.setupS3FileSystem("s3://testbucket/", "s3://s3.amazonaws.com"))) {
Assert.assertEquals(1, s.count());
}
mockS3.shutdown();
}
Aggregations