use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestFileSystemCaching method testCloseAllForUGI.
@Test
public void testCloseAllForUGI() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
//Now we should get the cached filesystem
FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
assertSame(fsA, fsA1);
FileSystem.closeAllForUGI(ugiA);
//Now we should get a different (newly created) filesystem
fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"), conf);
}
});
assertNotSame(fsA, fsA1);
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestFileSystemCaching method testDeleteOnExitRemoved.
@Test
public void testDeleteOnExitRemoved() throws IOException {
FileSystem mockFs = mock(FileSystem.class);
FileSystem fs = new FilterFileSystem(mockFs);
Path path = new Path("/a");
// don't delete on close if path existed, but later removed
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
reset(mockFs);
fs.close();
verify(mockFs).getFileStatus(eq(path));
verify(mockFs, never()).delete(any(Path.class), anyBoolean());
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class TestFileSystemCaching method testDefaultFsUris.
@Test
public void testDefaultFsUris() throws Exception {
final Configuration conf = new Configuration();
conf.set("fs.defaultfs.impl", DefaultFs.class.getName());
final URI defaultUri = URI.create("defaultfs://host");
FileSystem.setDefaultUri(conf, defaultUri);
FileSystem fs = null;
// sanity check default fs
final FileSystem defaultFs = FileSystem.get(conf);
assertEquals(defaultUri, defaultFs.getUri());
// has scheme, no auth
fs = FileSystem.get(URI.create("defaultfs:/"), conf);
assertSame(defaultFs, fs);
fs = FileSystem.get(URI.create("defaultfs:///"), conf);
assertSame(defaultFs, fs);
// has scheme, same auth
fs = FileSystem.get(URI.create("defaultfs://host"), conf);
assertSame(defaultFs, fs);
// has scheme, different auth
fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
assertNotSame(defaultFs, fs);
// no scheme, no auth
fs = FileSystem.get(URI.create("/"), conf);
assertSame(defaultFs, fs);
// no scheme, same auth
try {
fs = FileSystem.get(URI.create("//host"), conf);
fail("got fs with auth but no scheme");
} catch (UnsupportedFileSystemException e) {
}
// no scheme, different auth
try {
fs = FileSystem.get(URI.create("//host2"), conf);
fail("got fs with auth but no scheme");
} catch (UnsupportedFileSystemException e) {
}
}
use of org.apache.hadoop.fs.FileSystem in project flink by apache.
the class HDFSCopyFromLocal method copyFromLocal.
public static void copyFromLocal(final File localPath, final URI remotePath) throws Exception {
// Do it in another Thread because HDFS can deadlock if being interrupted while copying
String threadName = "HDFS Copy from " + localPath + " to " + remotePath;
final Tuple1<Exception> asyncException = Tuple1.of(null);
Thread copyThread = new Thread(threadName) {
@Override
public void run() {
try {
Configuration hadoopConf = HadoopFileSystem.getHadoopConfiguration();
FileSystem fs = FileSystem.get(remotePath, hadoopConf);
fs.copyFromLocalFile(new Path(localPath.getAbsolutePath()), new Path(remotePath));
} catch (Exception t) {
asyncException.f0 = t;
}
}
};
copyThread.setDaemon(true);
copyThread.start();
copyThread.join();
if (asyncException.f0 != null) {
throw asyncException.f0;
}
}
use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.
the class GenericOptionsParser method processGeneralOptions.
/**
* Modify configuration according user-specified generic options.
*
* @param line User-specified generic options
*/
private void processGeneralOptions(CommandLine line) throws IOException {
if (line.hasOption("fs")) {
FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
}
if (line.hasOption("jt")) {
String optionValue = line.getOptionValue("jt");
if (optionValue.equalsIgnoreCase("local")) {
conf.set("mapreduce.framework.name", optionValue);
}
conf.set("yarn.resourcemanager.address", optionValue, "from -jt command line option");
}
if (line.hasOption("conf")) {
String[] values = line.getOptionValues("conf");
for (String value : values) {
conf.addResource(new Path(value));
}
}
if (line.hasOption('D')) {
String[] property = line.getOptionValues('D');
for (String prop : property) {
String[] keyval = prop.split("=", 2);
if (keyval.length == 2) {
conf.set(keyval[0], keyval[1], "from command line");
}
}
}
if (line.hasOption("libjars")) {
// for libjars, we allow expansion of wildcards
conf.set("tmpjars", validateFiles(line.getOptionValue("libjars"), true), "from -libjars command line option");
//setting libjars in client classpath
URL[] libjars = getLibJars(conf);
if (libjars != null && libjars.length > 0) {
conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
Thread.currentThread().setContextClassLoader(new URLClassLoader(libjars, Thread.currentThread().getContextClassLoader()));
}
}
if (line.hasOption("files")) {
conf.set("tmpfiles", validateFiles(line.getOptionValue("files")), "from -files command line option");
}
if (line.hasOption("archives")) {
conf.set("tmparchives", validateFiles(line.getOptionValue("archives")), "from -archives command line option");
}
conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
// tokensFile
if (line.hasOption("tokenCacheFile")) {
String fileName = line.getOptionValue("tokenCacheFile");
// check if the local file exists
FileSystem localFs = FileSystem.getLocal(conf);
Path p = localFs.makeQualified(new Path(fileName));
localFs.getFileStatus(p);
if (LOG.isDebugEnabled()) {
LOG.debug("setting conf tokensFile: " + fileName);
}
UserGroupInformation.getCurrentUser().addCredentials(Credentials.readTokenStorageFile(p, conf));
conf.set("mapreduce.job.credentials.binary", p.toString(), "from -tokenCacheFile command line option");
}
}
Aggregations