Search in sources :

Example 36 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWebHDFS method testRaceWhileNNStartup.

/**
   * Make sure a RetriableException is thrown when rpcServer is null in
   * NamenodeWebHdfsMethods.
   */
@Test
public void testRaceWhileNNStartup() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final NameNode namenode = cluster.getNameNode();
        final NamenodeProtocols rpcServer = namenode.getRpcServer();
        Whitebox.setInternalState(namenode, "rpcServer", null);
        final Path foo = new Path("/foo");
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        try {
            webHdfs.mkdirs(foo);
            fail("Expected RetriableException");
        } catch (RetriableException e) {
            GenericTestUtils.assertExceptionContains("Namenode is in startup mode", e);
        }
        Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RetriableException(org.apache.hadoop.ipc.RetriableException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 37 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWebHDFS method testStoragePolicy.

@Test
public void testStoragePolicy() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    final Path path = new Path("/file");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        // test getAllStoragePolicies
        BlockStoragePolicy[] dfsPolicies = (BlockStoragePolicy[]) dfs.getAllStoragePolicies().toArray();
        BlockStoragePolicy[] webHdfsPolicies = (BlockStoragePolicy[]) webHdfs.getAllStoragePolicies().toArray();
        Assert.assertTrue(Arrays.equals(dfsPolicies, webHdfsPolicies));
        // test get/set/unset policies
        DFSTestUtil.createFile(dfs, path, 0, (short) 1, 0L);
        // get defaultPolicy
        BlockStoragePolicySpi defaultdfsPolicy = dfs.getStoragePolicy(path);
        // set policy through webhdfs
        webHdfs.setStoragePolicy(path, HdfsConstants.COLD_STORAGE_POLICY_NAME);
        // get policy from dfs
        BlockStoragePolicySpi dfsPolicy = dfs.getStoragePolicy(path);
        // get policy from webhdfs
        BlockStoragePolicySpi webHdfsPolicy = webHdfs.getStoragePolicy(path);
        Assert.assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(), webHdfsPolicy.getName());
        Assert.assertEquals(webHdfsPolicy, dfsPolicy);
        // unset policy
        webHdfs.unsetStoragePolicy(path);
        Assert.assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 38 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWebHDFS method testWebHdfsCreateNonRecursive.

@Test
public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    WebHdfsFileSystem webHdfs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        TestFileCreation.testFileCreationNonRecursive(webHdfs);
    } finally {
        if (webHdfs != null) {
            webHdfs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 39 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWebHDFS method testDTInInsecureCluster.

@Test
public void testDTInInsecureCluster() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        webHdfs.getDelegationToken(null);
        fail("No exception is thrown.");
    } catch (AccessControlException ace) {
        Assert.assertTrue(ace.getMessage().startsWith(WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) AccessControlException(org.apache.hadoop.security.AccessControlException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 40 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestWebHDFS method testWebHdfsRenameSnapshot.

/**
   * Test snapshot rename through WebHdfs
   */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        final Path foo = new Path("/foo");
        dfs.mkdirs(foo);
        dfs.allowSnapshot(foo);
        webHdfs.createSnapshot(foo, "s1");
        final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
        Assert.assertTrue(webHdfs.exists(s1path));
        // rename s1 to s2 with oldsnapshotName as null
        try {
            webHdfs.renameSnapshot(foo, null, "s2");
            fail("Expected IllegalArgumentException");
        } catch (RemoteException e) {
            Assert.assertEquals("Required param oldsnapshotname for " + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
        }
        // rename s1 to s2
        webHdfs.renameSnapshot(foo, "s1", "s2");
        assertFalse(webHdfs.exists(s1path));
        final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
        Assert.assertTrue(webHdfs.exists(s2path));
        webHdfs.deleteSnapshot(foo, "s2");
        assertFalse(webHdfs.exists(s2path));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24