Search in sources :

Example 6 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestSafeMode method testSafeModeWhenZeroBlockLocations.

@Test
public void testSafeModeWhenZeroBlockLocations() throws IOException {
    try {
        Path file1 = new Path("/tmp/testManualSafeMode/file1");
        Path file2 = new Path("/tmp/testManualSafeMode/file2");
        System.out.println("Created file1 and file2.");
        // create two files with one block each.
        DFSTestUtil.createFile(fs, file1, 1000, (short) 1, 0);
        DFSTestUtil.createFile(fs, file2, 2000, (short) 1, 0);
        checkGetBlockLocationsWorks(fs, file1);
        NameNode namenode = cluster.getNameNode();
        // manually set safemode.
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        assertTrue("should still be in SafeMode", namenode.isInSafeMode());
        // getBlock locations should still work since block locations exists
        checkGetBlockLocationsWorks(fs, file1);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        assertFalse("should not be in SafeMode", namenode.isInSafeMode());
        // Now 2nd part of the tests where there aren't block locations
        cluster.shutdownDataNodes();
        cluster.shutdownNameNode(0);
        // now bring up just the NameNode.
        cluster.restartNameNode();
        cluster.waitActive();
        System.out.println("Restarted cluster with just the NameNode");
        namenode = cluster.getNameNode();
        assertTrue("No datanode is started. Should be in SafeMode", namenode.isInSafeMode());
        FileStatus stat = fs.getFileStatus(file1);
        try {
            fs.getFileBlockLocations(stat, 0, 1000);
            assertTrue("Should have got safemode exception", false);
        } catch (SafeModeException e) {
        // as expected 
        } catch (RemoteException re) {
            if (!re.getClassName().equals(SafeModeException.class.getName()))
                assertTrue("Should have got safemode exception", false);
        }
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        assertFalse("Should not be in safemode", namenode.isInSafeMode());
        checkGetBlockLocationsWorks(fs, file1);
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) FileStatus(org.apache.hadoop.fs.FileStatus) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 7 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenFileNotFoundException.

@Test
public void testHedgingWhenFileNotFoundException() throws Exception {
    NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(active.getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong())).thenThrow(new RemoteException("java.io.FileNotFoundException", "File does not exist!"));
    NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(standby.getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong())).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
    try {
        provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
        Assert.fail("Should fail since the active namenode throws" + " FileNotFoundException!");
    } catch (MultiException me) {
        for (Exception ex : me.getExceptions().values()) {
            Exception rEx = ((RemoteException) ex).unwrapRemoteException();
            if (rEx instanceof StandbyException) {
                continue;
            }
            Assert.assertTrue(rEx instanceof FileNotFoundException);
        }
    }
    Mockito.verify(active).getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong());
    Mockito.verify(standby).getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong());
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) StandbyException(org.apache.hadoop.ipc.StandbyException) FileNotFoundException(java.io.FileNotFoundException) RemoteException(org.apache.hadoop.ipc.RemoteException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 8 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestWebHDFS method testWebHdfsRenameSnapshot.

/**
   * Test snapshot rename through WebHdfs
   */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        final Path foo = new Path("/foo");
        dfs.mkdirs(foo);
        dfs.allowSnapshot(foo);
        webHdfs.createSnapshot(foo, "s1");
        final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
        Assert.assertTrue(webHdfs.exists(s1path));
        // rename s1 to s2 with oldsnapshotName as null
        try {
            webHdfs.renameSnapshot(foo, null, "s2");
            fail("Expected IllegalArgumentException");
        } catch (RemoteException e) {
            Assert.assertEquals("Required param oldsnapshotname for " + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
        }
        // rename s1 to s2
        webHdfs.renameSnapshot(foo, "s1", "s2");
        assertFalse(webHdfs.exists(s1path));
        final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
        Assert.assertTrue(webHdfs.exists(s2path));
        webHdfs.deleteSnapshot(foo, "s2");
        assertFalse(webHdfs.exists(s2path));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 9 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestFileStatus method testGetFileInfo.

/** Test calling getFileInfo directly on the client */
@Test
public void testGetFileInfo() throws IOException {
    // Check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
    // Make sure getFileInfo returns null for files which do not exist
    HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
    assertEquals("Non-existant file should result in null", null, fileInfo);
    Path path1 = new Path("/name1");
    Path path2 = new Path("/name1/name2");
    assertTrue(fs.mkdirs(path1));
    FSDataOutputStream out = fs.create(path2, false);
    out.close();
    fileInfo = dfsClient.getFileInfo(path1.toString());
    assertEquals(1, fileInfo.getChildrenNum());
    fileInfo = dfsClient.getFileInfo(path2.toString());
    assertEquals(0, fileInfo.getChildrenNum());
    // Test getFileInfo throws the right exception given a non-absolute path.
    try {
        dfsClient.getFileInfo("non-absolute");
        fail("getFileInfo for a non-absolute path did not throw IOException");
    } catch (RemoteException re) {
        assertTrue("Wrong exception for invalid file name: " + re, re.toString().contains("Absolute path required"));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 10 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestDiskBalancerCommand method testSubmitPlanInNonRegularStatus.

/**
   * Tests if it's allowed to submit and execute plan when Datanode is in status
   * other than REGULAR.
   */
@Test(timeout = 60000)
public void testSubmitPlanInNonRegularStatus() throws Exception {
    final int numDatanodes = 1;
    MiniDFSCluster miniCluster = null;
    final Configuration hdfsConf = new HdfsConfiguration();
    try {
        /* new cluster with imbalanced capacity */
        miniCluster = DiskBalancerTestUtil.newImbalancedCluster(hdfsConf, numDatanodes, CAPACITIES, DEFAULT_BLOCK_SIZE, FILE_LEN, StartupOption.ROLLBACK);
        /* get full path of plan */
        final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
        try {
            /* run execute command */
            final String cmdLine = String.format("hdfs diskbalancer -%s %s", EXECUTE, planFileFullName);
            runCommand(cmdLine, hdfsConf, miniCluster);
        } catch (RemoteException e) {
            assertThat(e.getClassName(), containsString("DiskBalancerException"));
            assertThat(e.toString(), is(allOf(containsString("Datanode is in special state"), containsString("Disk balancing not permitted."))));
        }
    } finally {
        if (miniCluster != null) {
            miniCluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6