Search in sources :

Example 36 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestFileSystemCaching method testCloseAllForUGI.

@Test
public void testCloseAllForUGI() throws Exception {
    final Configuration conf = new Configuration();
    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
    UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
    FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    //Now we should get the cached filesystem
    FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    assertSame(fsA, fsA1);
    FileSystem.closeAllForUGI(ugiA);
    //Now we should get a different (newly created) filesystem
    fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws Exception {
            return FileSystem.get(new URI("cachedfile://a"), conf);
        }
    });
    assertNotSame(fsA, fsA1);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) URI(java.net.URI) URISyntaxException(java.net.URISyntaxException) IOException(java.io.IOException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 37 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestFileSystemCaching method testDeleteOnExitRemoved.

@Test
public void testDeleteOnExitRemoved() throws IOException {
    FileSystem mockFs = mock(FileSystem.class);
    FileSystem fs = new FilterFileSystem(mockFs);
    Path path = new Path("/a");
    // don't delete on close if path existed, but later removed
    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
    assertTrue(fs.deleteOnExit(path));
    verify(mockFs).getFileStatus(eq(path));
    reset(mockFs);
    fs.close();
    verify(mockFs).getFileStatus(eq(path));
    verify(mockFs, never()).delete(any(Path.class), anyBoolean());
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 38 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestFileSystemCaching method testDefaultFsUris.

@Test
public void testDefaultFsUris() throws Exception {
    final Configuration conf = new Configuration();
    conf.set("fs.defaultfs.impl", DefaultFs.class.getName());
    final URI defaultUri = URI.create("defaultfs://host");
    FileSystem.setDefaultUri(conf, defaultUri);
    FileSystem fs = null;
    // sanity check default fs
    final FileSystem defaultFs = FileSystem.get(conf);
    assertEquals(defaultUri, defaultFs.getUri());
    // has scheme, no auth
    fs = FileSystem.get(URI.create("defaultfs:/"), conf);
    assertSame(defaultFs, fs);
    fs = FileSystem.get(URI.create("defaultfs:///"), conf);
    assertSame(defaultFs, fs);
    // has scheme, same auth
    fs = FileSystem.get(URI.create("defaultfs://host"), conf);
    assertSame(defaultFs, fs);
    // has scheme, different auth
    fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
    assertNotSame(defaultFs, fs);
    // no scheme, no auth
    fs = FileSystem.get(URI.create("/"), conf);
    assertSame(defaultFs, fs);
    // no scheme, same auth
    try {
        fs = FileSystem.get(URI.create("//host"), conf);
        fail("got fs with auth but no scheme");
    } catch (UnsupportedFileSystemException e) {
    }
    // no scheme, different auth
    try {
        fs = FileSystem.get(URI.create("//host2"), conf);
        fail("got fs with auth but no scheme");
    } catch (UnsupportedFileSystemException e) {
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) URI(java.net.URI) Test(org.junit.Test)

Example 39 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project flink by apache.

the class HDFSCopyFromLocal method copyFromLocal.

public static void copyFromLocal(final File localPath, final URI remotePath) throws Exception {
    // Do it in another Thread because HDFS can deadlock if being interrupted while copying
    String threadName = "HDFS Copy from " + localPath + " to " + remotePath;
    final Tuple1<Exception> asyncException = Tuple1.of(null);
    Thread copyThread = new Thread(threadName) {

        @Override
        public void run() {
            try {
                Configuration hadoopConf = HadoopFileSystem.getHadoopConfiguration();
                FileSystem fs = FileSystem.get(remotePath, hadoopConf);
                fs.copyFromLocalFile(new Path(localPath.getAbsolutePath()), new Path(remotePath));
            } catch (Exception t) {
                asyncException.f0 = t;
            }
        }
    };
    copyThread.setDaemon(true);
    copyThread.start();
    copyThread.join();
    if (asyncException.f0 != null) {
        throw asyncException.f0;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) HadoopFileSystem(org.apache.flink.runtime.fs.hdfs.HadoopFileSystem)

Example 40 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class GenericOptionsParser method processGeneralOptions.

/**
   * Modify configuration according user-specified generic options.
   *
   * @param line User-specified generic options
   */
private void processGeneralOptions(CommandLine line) throws IOException {
    if (line.hasOption("fs")) {
        FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
    }
    if (line.hasOption("jt")) {
        String optionValue = line.getOptionValue("jt");
        if (optionValue.equalsIgnoreCase("local")) {
            conf.set("mapreduce.framework.name", optionValue);
        }
        conf.set("yarn.resourcemanager.address", optionValue, "from -jt command line option");
    }
    if (line.hasOption("conf")) {
        String[] values = line.getOptionValues("conf");
        for (String value : values) {
            conf.addResource(new Path(value));
        }
    }
    if (line.hasOption('D')) {
        String[] property = line.getOptionValues('D');
        for (String prop : property) {
            String[] keyval = prop.split("=", 2);
            if (keyval.length == 2) {
                conf.set(keyval[0], keyval[1], "from command line");
            }
        }
    }
    if (line.hasOption("libjars")) {
        // for libjars, we allow expansion of wildcards
        conf.set("tmpjars", validateFiles(line.getOptionValue("libjars"), true), "from -libjars command line option");
        //setting libjars in client classpath
        URL[] libjars = getLibJars(conf);
        if (libjars != null && libjars.length > 0) {
            conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
            Thread.currentThread().setContextClassLoader(new URLClassLoader(libjars, Thread.currentThread().getContextClassLoader()));
        }
    }
    if (line.hasOption("files")) {
        conf.set("tmpfiles", validateFiles(line.getOptionValue("files")), "from -files command line option");
    }
    if (line.hasOption("archives")) {
        conf.set("tmparchives", validateFiles(line.getOptionValue("archives")), "from -archives command line option");
    }
    conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
    // tokensFile
    if (line.hasOption("tokenCacheFile")) {
        String fileName = line.getOptionValue("tokenCacheFile");
        // check if the local file exists
        FileSystem localFs = FileSystem.getLocal(conf);
        Path p = localFs.makeQualified(new Path(fileName));
        localFs.getFileStatus(p);
        if (LOG.isDebugEnabled()) {
            LOG.debug("setting conf tokensFile: " + fileName);
        }
        UserGroupInformation.getCurrentUser().addCredentials(Credentials.readTokenStorageFile(p, conf));
        conf.set("mapreduce.job.credentials.binary", p.toString(), "from -tokenCacheFile command line option");
    }
}
Also used : Path(org.apache.hadoop.fs.Path) URLClassLoader(java.net.URLClassLoader) FileSystem(org.apache.hadoop.fs.FileSystem) URL(java.net.URL)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81