Search in sources :

Example 16 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestOfflineImageViewerForAcl method createOriginalFSImage.

/**
   * Create a populated namespace for later testing. Save its contents to a
   * data structure and store its fsimage location.
   * We only want to generate the fsimage file once and use it for
   * multiple tests.
   */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new Configuration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace with ACLs
        Path dir = new Path("/dirWithNoAcl");
        hdfs.mkdirs(dir);
        writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
        dir = new Path("/dirWithDefaultAcl");
        hdfs.mkdirs(dir);
        hdfs.setAcl(dir, Lists.newArrayList(aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)));
        writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
        Path file = new Path("/noAcl");
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        file = new Path("/withAcl");
        o = hdfs.create(file);
        o.write(23);
        o.close();
        hdfs.setAcl(file, Lists.newArrayList(aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, OTHER, NONE)));
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        file = new Path("/withSeveralAcls");
        o = hdfs.create(file);
        o.write(23);
        o.close();
        hdfs.setAcl(file, Lists.newArrayList(aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, "foo", READ_WRITE), aclEntry(ACCESS, USER, "bar", READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, GROUP, "group", READ), aclEntry(ACCESS, OTHER, NONE)));
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        // Write results to the fsimage file
        hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        // Determine the location of the fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BeforeClass(org.junit.BeforeClass)

Example 17 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestWebHDFS method testStoragePolicy.

@Test
public void testStoragePolicy() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    final Path path = new Path("/file");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        // test getAllStoragePolicies
        BlockStoragePolicy[] dfsPolicies = (BlockStoragePolicy[]) dfs.getAllStoragePolicies().toArray();
        BlockStoragePolicy[] webHdfsPolicies = (BlockStoragePolicy[]) webHdfs.getAllStoragePolicies().toArray();
        Assert.assertTrue(Arrays.equals(dfsPolicies, webHdfsPolicies));
        // test get/set/unset policies
        DFSTestUtil.createFile(dfs, path, 0, (short) 1, 0L);
        // get defaultPolicy
        BlockStoragePolicySpi defaultdfsPolicy = dfs.getStoragePolicy(path);
        // set policy through webhdfs
        webHdfs.setStoragePolicy(path, HdfsConstants.COLD_STORAGE_POLICY_NAME);
        // get policy from dfs
        BlockStoragePolicySpi dfsPolicy = dfs.getStoragePolicy(path);
        // get policy from webhdfs
        BlockStoragePolicySpi webHdfsPolicy = webHdfs.getStoragePolicy(path);
        Assert.assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(), webHdfsPolicy.getName());
        Assert.assertEquals(webHdfsPolicy, dfsPolicy);
        // unset policy
        webHdfs.unsetStoragePolicy(path);
        Assert.assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 18 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestWebHDFS method testWebHdfsRenameSnapshot.

/**
   * Test snapshot rename through WebHdfs
   */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        final Path foo = new Path("/foo");
        dfs.mkdirs(foo);
        dfs.allowSnapshot(foo);
        webHdfs.createSnapshot(foo, "s1");
        final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
        Assert.assertTrue(webHdfs.exists(s1path));
        // rename s1 to s2 with oldsnapshotName as null
        try {
            webHdfs.renameSnapshot(foo, null, "s2");
            fail("Expected IllegalArgumentException");
        } catch (RemoteException e) {
            Assert.assertEquals("Required param oldsnapshotname for " + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
        }
        // rename s1 to s2
        webHdfs.renameSnapshot(foo, "s1", "s2");
        assertFalse(webHdfs.exists(s1path));
        final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
        Assert.assertTrue(webHdfs.exists(s2path));
        webHdfs.deleteSnapshot(foo, "s2");
        assertFalse(webHdfs.exists(s2path));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 19 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestOfflineImageViewerForContentSummary method createOriginalFSImage.

/**
   * Create a populated namespace for later testing. Save its contents to a
   * data structure and store its fsimage location. We only want to generate
   * the fsimage file once and use it for multiple tests.
   */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    MiniDFSCluster cluster = null;
    Configuration conf = new Configuration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        Path parentDir = new Path("/parentDir");
        Path childDir1 = new Path(parentDir, "childDir1");
        Path childDir2 = new Path(parentDir, "childDir2");
        Path dirForLinks = new Path("/dirForLinks");
        hdfs.mkdirs(parentDir);
        hdfs.mkdirs(childDir1);
        hdfs.mkdirs(childDir2);
        hdfs.mkdirs(dirForLinks);
        hdfs.setQuota(parentDir, 10, 1024 * 1024 * 1024);
        Path file1OnParentDir = new Path(parentDir, "file1");
        try (FSDataOutputStream o = hdfs.create(file1OnParentDir)) {
            o.write("123".getBytes());
        }
        try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file2"))) {
            o.write("1234".getBytes());
        }
        try (FSDataOutputStream o = hdfs.create(new Path(childDir1, "file3"))) {
            o.write("123".getBytes());
        }
        try (FSDataOutputStream o = hdfs.create(new Path(parentDir, "file4"))) {
            o.write("123".getBytes());
        }
        Path link1 = new Path("/link1");
        Path link2 = new Path("/dirForLinks/linkfordir1");
        hdfs.createSymlink(new Path("/parentDir/file4"), link1, true);
        summaryFromDFS = hdfs.getContentSummary(parentDir);
        emptyDirSummaryFromDFS = hdfs.getContentSummary(childDir2);
        fileSummaryFromDFS = hdfs.getContentSummary(file1OnParentDir);
        symLinkSummaryFromDFS = hdfs.getContentSummary(link1);
        hdfs.createSymlink(childDir1, link2, true);
        symLinkSummaryForDirContainsFromDFS = hdfs.getContentSummary(new Path("/dirForLinks"));
        // Write results to the fsimage file
        hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        // Determine the location of the fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BeforeClass(org.junit.BeforeClass)

Example 20 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestOfflineImageViewerForXAttr method createOriginalFSImage.

/**
   * Create a populated namespace for later testing. Save its contents to a data
   * structure and store its fsimage location. We only want to generate the
   * fsimage file once and use it for multiple tests.
   */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    MiniDFSCluster cluster = null;
    Configuration conf = new Configuration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a name space with XAttributes
        Path dir = new Path("/dir1");
        hdfs.mkdirs(dir);
        hdfs.setXAttr(dir, "user.attr1", "value1".getBytes());
        hdfs.setXAttr(dir, "user.attr2", "value2".getBytes());
        // Write results to the fsimage file
        hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        List<XAttr> attributes = new ArrayList<XAttr>();
        attributes.add(XAttrHelper.buildXAttr("user.attr1", "value1".getBytes()));
        attr1JSon = JsonUtil.toJsonString(attributes, null);
        attributes.add(XAttrHelper.buildXAttr("user.attr2", "value2".getBytes()));
        // Determine the location of the fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) XAttr(org.apache.hadoop.fs.XAttr) BeforeClass(org.junit.BeforeClass)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14