use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestSafeMode method testSafeModeWhenZeroBlockLocations.
@Test
public void testSafeModeWhenZeroBlockLocations() throws IOException {
try {
Path file1 = new Path("/tmp/testManualSafeMode/file1");
Path file2 = new Path("/tmp/testManualSafeMode/file2");
System.out.println("Created file1 and file2.");
// create two files with one block each.
DFSTestUtil.createFile(fs, file1, 1000, (short) 1, 0);
DFSTestUtil.createFile(fs, file2, 2000, (short) 1, 0);
checkGetBlockLocationsWorks(fs, file1);
NameNode namenode = cluster.getNameNode();
// manually set safemode.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertTrue("should still be in SafeMode", namenode.isInSafeMode());
// getBlock locations should still work since block locations exists
checkGetBlockLocationsWorks(fs, file1);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("should not be in SafeMode", namenode.isInSafeMode());
// Now 2nd part of the tests where there aren't block locations
cluster.shutdownDataNodes();
cluster.shutdownNameNode(0);
// now bring up just the NameNode.
cluster.restartNameNode();
cluster.waitActive();
System.out.println("Restarted cluster with just the NameNode");
namenode = cluster.getNameNode();
assertTrue("No datanode is started. Should be in SafeMode", namenode.isInSafeMode());
FileStatus stat = fs.getFileStatus(file1);
try {
fs.getFileBlockLocations(stat, 0, 1000);
assertTrue("Should have got safemode exception", false);
} catch (SafeModeException e) {
// as expected
} catch (RemoteException re) {
if (!re.getClassName().equals(SafeModeException.class.getName()))
assertTrue("Should have got safemode exception", false);
}
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("Should not be in safemode", namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs, file1);
} finally {
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenFileNotFoundException.
@Test
public void testHedgingWhenFileNotFoundException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong())).thenThrow(new RemoteException("java.io.FileNotFoundException", "File does not exist!"));
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong())).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
Assert.fail("Should fail since the active namenode throws" + " FileNotFoundException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
Exception rEx = ((RemoteException) ex).unwrapRemoteException();
if (rEx instanceof StandbyException) {
continue;
}
Assert.assertTrue(rEx instanceof FileNotFoundException);
}
}
Mockito.verify(active).getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong());
Mockito.verify(standby).getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong());
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestWebHDFS method testWebHdfsRenameSnapshot.
/**
* Test snapshot rename through WebHdfs
*/
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// rename s1 to s2 with oldsnapshotName as null
try {
webHdfs.renameSnapshot(foo, null, "s2");
fail("Expected IllegalArgumentException");
} catch (RemoteException e) {
Assert.assertEquals("Required param oldsnapshotname for " + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
}
// rename s1 to s2
webHdfs.renameSnapshot(foo, "s1", "s2");
assertFalse(webHdfs.exists(s1path));
final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
Assert.assertTrue(webHdfs.exists(s2path));
webHdfs.deleteSnapshot(foo, "s2");
assertFalse(webHdfs.exists(s2path));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestFileStatus method testGetFileInfo.
/** Test calling getFileInfo directly on the client */
@Test
public void testGetFileInfo() throws IOException {
// Check that / exists
Path path = new Path("/");
assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
// Make sure getFileInfo returns null for files which do not exist
HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null", null, fileInfo);
Path path1 = new Path("/name1");
Path path2 = new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out = fs.create(path2, false);
out.close();
fileInfo = dfsClient.getFileInfo(path1.toString());
assertEquals(1, fileInfo.getChildrenNum());
fileInfo = dfsClient.getFileInfo(path2.toString());
assertEquals(0, fileInfo.getChildrenNum());
// Test getFileInfo throws the right exception given a non-absolute path.
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
} catch (RemoteException re) {
assertTrue("Wrong exception for invalid file name: " + re, re.toString().contains("Absolute path required"));
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestDiskBalancerCommand method testSubmitPlanInNonRegularStatus.
/**
* Tests if it's allowed to submit and execute plan when Datanode is in status
* other than REGULAR.
*/
@Test(timeout = 60000)
public void testSubmitPlanInNonRegularStatus() throws Exception {
final int numDatanodes = 1;
MiniDFSCluster miniCluster = null;
final Configuration hdfsConf = new HdfsConfiguration();
try {
/* new cluster with imbalanced capacity */
miniCluster = DiskBalancerTestUtil.newImbalancedCluster(hdfsConf, numDatanodes, CAPACITIES, DEFAULT_BLOCK_SIZE, FILE_LEN, StartupOption.ROLLBACK);
/* get full path of plan */
final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
try {
/* run execute command */
final String cmdLine = String.format("hdfs diskbalancer -%s %s", EXECUTE, planFileFullName);
runCommand(cmdLine, hdfsConf, miniCluster);
} catch (RemoteException e) {
assertThat(e.getClassName(), containsString("DiskBalancerException"));
assertThat(e.toString(), is(allOf(containsString("Datanode is in special state"), containsString("Disk balancing not permitted."))));
}
} finally {
if (miniCluster != null) {
miniCluster.shutdown();
}
}
}
Aggregations