Search in sources :

Example 66 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestHttpFSServer method testGlobFilter.

@Test
@TestDir
@TestJetty
@TestHdfs
public void testGlobFilter() throws Exception {
    createHttpFSServer(false);
    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
    fs.mkdirs(new Path("/tmp"));
    fs.create(new Path("/tmp/foo.txt")).close();
    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
    URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
    BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
    reader.readLine();
    reader.close();
}
Also used : Path(org.apache.hadoop.fs.Path) HttpURLConnection(java.net.HttpURLConnection) InputStreamReader(java.io.InputStreamReader) FileSystem(org.apache.hadoop.fs.FileSystem) BufferedReader(java.io.BufferedReader) URL(java.net.URL) AuthenticatedURL(org.apache.hadoop.security.authentication.client.AuthenticatedURL) TestJetty(org.apache.hadoop.test.TestJetty) TestHdfs(org.apache.hadoop.test.TestHdfs) TestDir(org.apache.hadoop.test.TestDir) Test(org.junit.Test)

Example 67 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestHttpFSServer method testXAttrs.

/**
   * Validate XAttr get/set/remove calls.
   */
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
    final String name1 = "user.a1";
    final byte[] value1 = new byte[] { 0x31, 0x32, 0x33 };
    final String name2 = "user.a2";
    final byte[] value2 = new byte[] { 0x41, 0x42, 0x43 };
    final String dir = "/xattrTest";
    final String path = dir + "/file";
    createHttpFSServer(false);
    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
    fs.mkdirs(new Path(dir));
    createWithHttp(path, null);
    String statusJson = getStatus(path, "GETXATTRS");
    Map<String, byte[]> xAttrs = getXAttrs(statusJson);
    Assert.assertEquals(0, xAttrs.size());
    // Set two xattrs
    putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
    putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
    statusJson = getStatus(path, "GETXATTRS");
    xAttrs = getXAttrs(statusJson);
    Assert.assertEquals(2, xAttrs.size());
    Assert.assertArrayEquals(value1, xAttrs.get(name1));
    Assert.assertArrayEquals(value2, xAttrs.get(name2));
    // Remove one xattr
    putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
    statusJson = getStatus(path, "GETXATTRS");
    xAttrs = getXAttrs(statusJson);
    Assert.assertEquals(1, xAttrs.size());
    Assert.assertArrayEquals(value2, xAttrs.get(name2));
    // Remove another xattr, then there is no xattr
    putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
    statusJson = getStatus(path, "GETXATTRS");
    xAttrs = getXAttrs(statusJson);
    Assert.assertEquals(0, xAttrs.size());
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) TestJetty(org.apache.hadoop.test.TestJetty) TestHdfs(org.apache.hadoop.test.TestHdfs) TestDir(org.apache.hadoop.test.TestDir) Test(org.junit.Test)

Example 68 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestHttpFSServerNoACLs method testWithNoAcls.

/**
   * Ensure that
   * <ol>
   *   <li>GETFILESTATUS and LISTSTATUS work happily</li>
   *   <li>ACLSTATUS throws an exception</li>
   *   <li>The ACL SET, REMOVE, etc calls all fail</li>
   * </ol>
   *
   * @throws Exception
   */
@Test
@TestDir
@TestJetty
public void testWithNoAcls() throws Exception {
    final String aclUser1 = "user:foo:rw-";
    final String rmAclUser1 = "user:foo:";
    final String aclUser2 = "user:bar:r--";
    final String aclGroup1 = "group::r--";
    final String aclSpec = "aclspec=user::rwx," + aclUser1 + "," + aclGroup1 + ",other::---";
    final String modAclSpec = "aclspec=" + aclUser2;
    final String remAclSpec = "aclspec=" + rmAclUser1;
    final String defUser1 = "default:user:glarch:r-x";
    final String defSpec1 = "aclspec=" + defUser1;
    final String dir = "/noACLs";
    final String path = dir + "/foo";
    startMiniDFS();
    createHttpFSServer();
    FileSystem fs = FileSystem.get(nnConf);
    fs.mkdirs(new Path(dir));
    OutputStream os = fs.create(new Path(path));
    os.write(1);
    os.close();
    /* The normal status calls work as expected; GETACLSTATUS fails */
    getStatus(path, "GETFILESTATUS", true);
    getStatus(dir, "LISTSTATUS", true);
    getStatus(path, "GETACLSTATUS", false);
    /* All the ACL-based PUT commands fail with ACL exceptions */
    putCmd(path, "SETACL", aclSpec, false);
    putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
    putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
    putCmd(path, "REMOVEACL", null, false);
    putCmd(dir, "SETACL", defSpec1, false);
    putCmd(dir, "REMOVEDEFAULTACL", null, false);
    miniDfs.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) TestJetty(org.apache.hadoop.test.TestJetty) TestDir(org.apache.hadoop.test.TestDir) Test(org.junit.Test)

Example 69 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestHttpFSWithKerberos method testDelegationTokenWithFS.

@SuppressWarnings("deprecation")
private void testDelegationTokenWithFS(Class fileSystemClass) throws Exception {
    createHttpFSServer();
    Configuration conf = new Configuration();
    conf.set("fs.webhdfs.impl", fileSystemClass.getName());
    conf.set("fs.hdfs.impl.disable.cache", "true");
    URI uri = new URI("webhdfs://" + TestJettyHelper.getJettyURL().toURI().getAuthority());
    FileSystem fs = FileSystem.get(uri, conf);
    Token<?>[] tokens = fs.addDelegationTokens("foo", null);
    fs.close();
    Assert.assertEquals(1, tokens.length);
    fs = FileSystem.get(uri, conf);
    ((DelegationTokenRenewer.Renewable) fs).setDelegationToken(tokens[0]);
    fs.listStatus(new Path("/"));
    fs.close();
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) HttpFSFileSystem(org.apache.hadoop.fs.http.client.HttpFSFileSystem) Token(org.apache.hadoop.security.token.Token) URI(java.net.URI)

Example 70 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestFileSystemAccessService method fileSystemCache.

@Test
@TestDir
@TestHdfs
public void fileSystemCache() throws Exception {
    String dir = TestDirHelper.getTestDir().getAbsolutePath();
    String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName(), FileSystemAccessService.class.getName()));
    Configuration hadoopConf = new Configuration(false);
    hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
    createHadoopConf(hadoopConf);
    Configuration conf = new Configuration(false);
    conf.set("server.services", services);
    conf.set("server.hadoop.filesystem.cache.purge.frequency", "1");
    conf.set("server.hadoop.filesystem.cache.purge.timeout", "1");
    Server server = new Server("server", dir, dir, dir, dir, conf);
    try {
        server.init();
        FileSystemAccess hadoop = server.get(FileSystemAccess.class);
        FileSystem fs1 = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
        Assert.assertNotNull(fs1);
        fs1.mkdirs(new Path("/tmp/foo1"));
        hadoop.releaseFileSystem(fs1);
        //still around because of caching
        fs1.mkdirs(new Path("/tmp/foo2"));
        FileSystem fs2 = hadoop.createFileSystem("u", hadoop.getFileSystemConfiguration());
        //should be same instance because of caching
        Assert.assertEquals(fs1, fs2);
        Thread.sleep(4 * 1000);
        //still around because of lease count is 1 (fs2 is out)
        fs1.mkdirs(new Path("/tmp/foo2"));
        Thread.sleep(4 * 1000);
        //still around because of lease count is 1 (fs2 is out)
        fs2.mkdirs(new Path("/tmp/foo"));
        hadoop.releaseFileSystem(fs2);
        Thread.sleep(4 * 1000);
        //should not be around as lease count is 0
        try {
            fs2.mkdirs(new Path("/tmp/foo"));
            Assert.fail();
        } catch (IOException ex) {
        } catch (Exception ex) {
            Assert.fail();
        }
    } finally {
        server.destroy();
    }
}
Also used : FileSystemAccess(org.apache.hadoop.lib.service.FileSystemAccess) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Server(org.apache.hadoop.lib.server.Server) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) TestException(org.apache.hadoop.test.TestException) IOException(java.io.IOException) FileSystemAccessException(org.apache.hadoop.lib.service.FileSystemAccessException) ServiceException(org.apache.hadoop.lib.server.ServiceException) TestHdfs(org.apache.hadoop.test.TestHdfs) TestDir(org.apache.hadoop.test.TestDir) Test(org.junit.Test)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81