use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.
@Test
public void testHedgingWhenConnectException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getStats()).thenThrow(new ConnectException());
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (ex instanceof RemoteException) {
Exception rEx = ((RemoteException) ex).unwrapRemoteException();
Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
} else {
Assert.assertTrue(ex instanceof ConnectException);
}
}
}
Mockito.verify(active).getStats();
Mockito.verify(standby).getStats();
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestSnapshot method testCreateSnapshotWithIllegalName.
/**
* Test creating a snapshot with illegal name
*/
@Test
public void testCreateSnapshotWithIllegalName() throws Exception {
final Path dir = new Path("/dir");
hdfs.mkdirs(dir);
final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
try {
hdfs.createSnapshot(dir, name1);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
String errorMsg = "Invalid path name Invalid snapshot name: " + name1;
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
final String[] badNames = new String[] { "foo" + Path.SEPARATOR, Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
for (String badName : badNames) {
try {
hdfs.createSnapshot(dir, badName);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
String errorMsg = "Invalid path name Invalid snapshot name: " + badName;
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestWebHDFS method testWebHdfsDeleteSnapshot.
/**
* Test snapshot deletion through WebHdfs
*/
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path spath = webHdfs.createSnapshot(foo, null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// delete operation snapshot name as null
try {
webHdfs.deleteSnapshot(foo, null);
fail("Expected IllegalArgumentException");
} catch (RemoteException e) {
Assert.assertEquals("Required param snapshotname for " + "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
}
// delete the two snapshots
webHdfs.deleteSnapshot(foo, "s1");
assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo, spath.getName());
assertFalse(webHdfs.exists(spath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestWebHDFSForHA method testClientFailoverWhenStandbyNNHasStaleCredentials.
@Test
public void testClientFailoverWhenStandbyNNHasStaleCredentials() throws IOException {
Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
MiniDFSCluster cluster = null;
WebHdfsFileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
cluster.waitActive();
fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
cluster.transitionToActive(0);
Token<?> token = fs.getDelegationToken(null);
final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(cluster.getNamesystem(0));
ExceptionHandler eh = new ExceptionHandler();
eh.initResponse(mock(HttpServletResponse.class));
Response resp = null;
try {
secretManager.retrievePassword(identifier);
} catch (IOException e) {
// Mimic the UserProvider class logic (server side) by throwing
// SecurityException here
Assert.assertTrue(e instanceof SecretManager.InvalidToken);
resp = eh.toResponse(new SecurityException(e));
}
// The Response (resp) below is what the server will send to client
//
// BEFORE HDFS-6475 fix, the resp.entity is
// {"RemoteException":{"exception":"SecurityException",
// "javaClassName":"java.lang.SecurityException",
// "message":"Failed to obtain user group information:
// org.apache.hadoop.security.token.SecretManager$InvalidToken:
// StandbyException"}}
// AFTER the fix, the resp.entity is
// {"RemoteException":{"exception":"StandbyException",
// "javaClassName":"org.apache.hadoop.ipc.StandbyException",
// "message":"Operation category READ is not supported in
// state standby"}}
//
// Mimic the client side logic by parsing the response from server
//
Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
RemoteException re = JsonUtilClient.toRemoteException(m);
Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
Assert.assertTrue(unwrapped instanceof StandbyException);
} finally {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class RMAdminCLI method run.
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration(getConf());
boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED);
if (args.length < 1) {
printUsage("", isHAEnabled);
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = args[i++];
exitCode = 0;
if ("-help".equals(cmd)) {
if (i < args.length) {
printUsage(args[i], isHAEnabled);
} else {
printHelp("", isHAEnabled);
}
return exitCode;
}
if (USAGE.containsKey(cmd)) {
if (isHAEnabled) {
return super.run(args);
}
System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled");
return -1;
}
//
if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshNodesResources".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd)) {
if (args.length != 1) {
printUsage(cmd, isHAEnabled);
return exitCode;
}
}
try {
if ("-refreshQueues".equals(cmd)) {
exitCode = refreshQueues();
} else if ("-refreshNodes".equals(cmd)) {
exitCode = handleRefreshNodes(args, cmd, isHAEnabled);
} else if ("-refreshNodesResources".equals(cmd)) {
exitCode = refreshNodesResources();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings();
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshAdminAcls".equals(cmd)) {
exitCode = refreshAdminAcls();
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcls();
} else if ("-refreshClusterMaxPriority".equals(cmd)) {
exitCode = refreshClusterMaxPriority();
} else if ("-getGroups".equals(cmd)) {
String[] usernames = Arrays.copyOfRange(args, i, args.length);
exitCode = getGroups(usernames);
} else if ("-updateNodeResource".equals(cmd)) {
exitCode = handleUpdateNodeResource(args, cmd, isHAEnabled);
} else if ("-addToClusterNodeLabels".equals(cmd)) {
exitCode = handleAddToClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-removeFromClusterNodeLabels".equals(cmd)) {
exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-replaceLabelsOnNode".equals(cmd)) {
exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("", isHAEnabled);
}
} catch (IllegalArgumentException arge) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd, isHAEnabled);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": " + content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage());
}
} catch (Exception e) {
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage());
}
if (null != localNodeLabelsManager) {
localNodeLabelsManager.stop();
}
return exitCode;
}
Aggregations