Search in sources :

Example 91 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.

@Test
public void testHedgingWhenConnectException() throws Exception {
    NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(active.getStats()).thenThrow(new ConnectException());
    NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
    } catch (MultiException me) {
        for (Exception ex : me.getExceptions().values()) {
            if (ex instanceof RemoteException) {
                Exception rEx = ((RemoteException) ex).unwrapRemoteException();
                Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
            } else {
                Assert.assertTrue(ex instanceof ConnectException);
            }
        }
    }
    Mockito.verify(active).getStats();
    Mockito.verify(standby).getStats();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) StandbyException(org.apache.hadoop.ipc.StandbyException) RemoteException(org.apache.hadoop.ipc.RemoteException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) ConnectException(java.net.ConnectException) Test(org.junit.Test)

Example 92 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestSnapshot method testCreateSnapshotWithIllegalName.

/**
   * Test creating a snapshot with illegal name
   */
@Test
public void testCreateSnapshotWithIllegalName() throws Exception {
    final Path dir = new Path("/dir");
    hdfs.mkdirs(dir);
    final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
    try {
        hdfs.createSnapshot(dir, name1);
        fail("Exception expected when an illegal name is given");
    } catch (RemoteException e) {
        String errorMsg = "Invalid path name Invalid snapshot name: " + name1;
        GenericTestUtils.assertExceptionContains(errorMsg, e);
    }
    final String[] badNames = new String[] { "foo" + Path.SEPARATOR, Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
    for (String badName : badNames) {
        try {
            hdfs.createSnapshot(dir, badName);
            fail("Exception expected when an illegal name is given");
        } catch (RemoteException e) {
            String errorMsg = "Invalid path name Invalid snapshot name: " + badName;
            GenericTestUtils.assertExceptionContains(errorMsg, e);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 93 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestWebHDFS method testWebHdfsDeleteSnapshot.

/**
   * Test snapshot deletion through WebHdfs
   */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        final Path foo = new Path("/foo");
        dfs.mkdirs(foo);
        dfs.allowSnapshot(foo);
        webHdfs.createSnapshot(foo, "s1");
        final Path spath = webHdfs.createSnapshot(foo, null);
        Assert.assertTrue(webHdfs.exists(spath));
        final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
        Assert.assertTrue(webHdfs.exists(s1path));
        // delete operation snapshot name as null
        try {
            webHdfs.deleteSnapshot(foo, null);
            fail("Expected IllegalArgumentException");
        } catch (RemoteException e) {
            Assert.assertEquals("Required param snapshotname for " + "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
        }
        // delete the two snapshots
        webHdfs.deleteSnapshot(foo, "s1");
        assertFalse(webHdfs.exists(s1path));
        webHdfs.deleteSnapshot(foo, spath.getName());
        assertFalse(webHdfs.exists(spath));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 94 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestWebHDFSForHA method testClientFailoverWhenStandbyNNHasStaleCredentials.

@Test
public void testClientFailoverWhenStandbyNNHasStaleCredentials() throws IOException {
    Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    MiniDFSCluster cluster = null;
    WebHdfsFileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
        HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
        cluster.waitActive();
        fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
        cluster.transitionToActive(0);
        Token<?> token = fs.getDelegationToken(null);
        final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
        identifier.readFields(new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(cluster.getNamesystem(0));
        ExceptionHandler eh = new ExceptionHandler();
        eh.initResponse(mock(HttpServletResponse.class));
        Response resp = null;
        try {
            secretManager.retrievePassword(identifier);
        } catch (IOException e) {
            // Mimic the UserProvider class logic (server side) by throwing
            // SecurityException here
            Assert.assertTrue(e instanceof SecretManager.InvalidToken);
            resp = eh.toResponse(new SecurityException(e));
        }
        // The Response (resp) below is what the server will send to client
        //
        // BEFORE HDFS-6475 fix, the resp.entity is
        //     {"RemoteException":{"exception":"SecurityException",
        //      "javaClassName":"java.lang.SecurityException",
        //      "message":"Failed to obtain user group information:
        //      org.apache.hadoop.security.token.SecretManager$InvalidToken:
        //        StandbyException"}}
        // AFTER the fix, the resp.entity is
        //     {"RemoteException":{"exception":"StandbyException",
        //      "javaClassName":"org.apache.hadoop.ipc.StandbyException",
        //      "message":"Operation category READ is not supported in
        //       state standby"}}
        //
        // Mimic the client side logic by parsing the response from server
        //
        Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
        RemoteException re = JsonUtilClient.toRemoteException(m);
        Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
        Assert.assertTrue(unwrapped instanceof StandbyException);
    } finally {
        IOUtils.cleanup(null, fs);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) HttpServletResponse(javax.servlet.http.HttpServletResponse) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ExceptionHandler(org.apache.hadoop.hdfs.web.resources.ExceptionHandler) HttpServletResponse(javax.servlet.http.HttpServletResponse) Response(javax.ws.rs.core.Response) DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) StandbyException(org.apache.hadoop.ipc.StandbyException) ByteArrayInputStream(java.io.ByteArrayInputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 95 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class RMAdminCLI method run.

@Override
public int run(String[] args) throws Exception {
    YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration(getConf());
    boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED);
    if (args.length < 1) {
        printUsage("", isHAEnabled);
        return -1;
    }
    int exitCode = -1;
    int i = 0;
    String cmd = args[i++];
    exitCode = 0;
    if ("-help".equals(cmd)) {
        if (i < args.length) {
            printUsage(args[i], isHAEnabled);
        } else {
            printHelp("", isHAEnabled);
        }
        return exitCode;
    }
    if (USAGE.containsKey(cmd)) {
        if (isHAEnabled) {
            return super.run(args);
        }
        System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled");
        return -1;
    }
    //
    if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshNodesResources".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd)) {
        if (args.length != 1) {
            printUsage(cmd, isHAEnabled);
            return exitCode;
        }
    }
    try {
        if ("-refreshQueues".equals(cmd)) {
            exitCode = refreshQueues();
        } else if ("-refreshNodes".equals(cmd)) {
            exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
        } else if ("-refreshNodesResources".equals(cmd)) {
            exitCode = refreshNodesResources();
        } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
            exitCode = refreshUserToGroupsMappings();
        } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
            exitCode = refreshSuperUserGroupsConfiguration();
        } else if ("-refreshAdminAcls".equals(cmd)) {
            exitCode = refreshAdminAcls();
        } else if ("-refreshServiceAcl".equals(cmd)) {
            exitCode = refreshServiceAcls();
        } else if ("-refreshClusterMaxPriority".equals(cmd)) {
            exitCode = refreshClusterMaxPriority();
        } else if ("-getGroups".equals(cmd)) {
            String[] usernames = Arrays.copyOfRange(args, i, args.length);
            exitCode = getGroups(usernames);
        } else if ("-updateNodeResource".equals(cmd)) {
            exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled);
        } else if ("-addToClusterNodeLabels".equals(cmd)) {
            exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
        } else if ("-removeFromClusterNodeLabels".equals(cmd)) {
            exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
        } else if ("-replaceLabelsOnNode".equals(cmd)) {
            exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
        } else {
            exitCode = -1;
            System.err.println(cmd.substring(1) + ": Unknown command");
            printUsage("", isHAEnabled);
        }
    } catch (IllegalArgumentException arge) {
        exitCode = -1;
        System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
        printUsage(cmd, isHAEnabled);
    } catch (RemoteException e) {
        //
        // This is a error returned by hadoop server. Print
        // out the first line of the error message, ignore the stack trace.
        exitCode = -1;
        try {
            String[] content;
            content = e.getLocalizedMessage().split("\n");
            System.err.println(cmd.substring(1) + ": " + content[0]);
        } catch (Exception ex) {
            System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage());
        }
    } catch (Exception e) {
        exitCode = -1;
        System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage());
    }
    if (null != localNodeLabelsManager) {
        localNodeLabelsManager.stop();
    }
    return exitCode;
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) ParseException(org.apache.commons.cli.ParseException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) MissingArgumentException(org.apache.commons.cli.MissingArgumentException)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6