use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.
the class TestFailureOfSharedDir method testFailureOfSharedDir.
/**
* Test that marking the shared edits dir as being "required" causes the NN to
* fail if that dir can't be accessed.
*/
@Test
public void testFailureOfSharedDir() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 2000);
// The shared edits dir will automatically be marked required.
MiniDFSCluster cluster = null;
File sharedEditsDir = null;
try {
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(new Path("/test1")));
// Blow away the shared edits dir.
URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
sharedEditsDir = new File(sharedEditsUri);
assertEquals(0, FileUtil.chmod(sharedEditsDir.getAbsolutePath(), "-w", true));
Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
NameNode nn1 = cluster.getNameNode(1);
assertTrue(nn1.isStandbyState());
assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability", nn1.isInSafeMode());
NameNode nn0 = cluster.getNameNode(0);
try {
// Make sure that subsequent operations on the NN fail.
nn0.getRpcServer().rollEditLog();
fail("Succeeded in rolling edit log despite shared dir being deleted");
} catch (ExitException ee) {
GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal", ee);
}
// dir didn't roll. Regression test for HDFS-2874.
for (URI editsUri : cluster.getNameEditsDirs(0)) {
if (editsUri.equals(sharedEditsUri)) {
continue;
}
File editsDir = new File(editsUri.getPath());
File curDir = new File(editsDir, "current");
GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getInProgressEditsFileName(1));
}
} finally {
if (sharedEditsDir != null) {
// without this test cleanup will fail
FileUtil.chmod(sharedEditsDir.getAbsolutePath(), "+w", true);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.
the class TestNativeLibraryChecker method expectOutput.
private void expectOutput(String[] args) {
ExitUtil.disableSystemExit();
ByteArrayOutputStream outContent = new ByteArrayOutputStream();
PrintStream originalPs = System.out;
System.setOut(new PrintStream(outContent));
try {
NativeLibraryChecker.main(args);
} catch (ExitException e) {
ExitUtil.resetFirstExitException();
} finally {
if (Shell.WINDOWS) {
assertEquals(outContent.toString().indexOf("winutils: true") != -1, true);
}
if (NativeCodeLoader.isNativeCodeLoaded()) {
assertEquals(outContent.toString().indexOf("hadoop: true") != -1, true);
}
System.setOut(originalPs);
}
}
use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.
the class TestClusterId method testFormatWithoutForceEnterYes.
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter Y when prompted and the format should succeed.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException {
// we check for a non empty dir, so create a child path
File data = new File(hdfsDir, "file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
// capture the input stream
InputStream origIn = System.in;
ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
System.setIn(bins);
String[] argv = { "-format" };
try {
NameNode.createNameNode(argv, config);
fail("createNameNode() did not call System.exit()");
} catch (ExitException e) {
assertEquals("Format should have succeeded", 0, e.status);
}
System.setIn(origIn);
String cid = getClusterId(config);
assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.
the class TestStateTransitionFailure method testFailureToTransitionCausesShutdown.
/**
* Ensure that a failure to fully transition to the active state causes a
* shutdown of the NameNode.
*/
@Test
public void testFailureToTransitionCausesShutdown() throws IOException {
MiniDFSCluster cluster = null;
try {
Configuration conf = new Configuration();
// Set an illegal value for the trash emptier interval. This will cause
// the NN to fail to transition to the active state.
conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, -1);
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
try {
cluster.transitionToActive(0);
fail("Transitioned to active but should not have been able to.");
} catch (ExitException ee) {
assertExceptionContains("Cannot start trash emptier with negative interval", ee);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.
the class TestCheckpoint method testTooManyEditReplayFailures.
/*
* Simulate 2NN exit due to too many merge failures.
*/
@Test(timeout = 30000)
public void testTooManyEditReplayFailures() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
FSDataOutputStream fos = null;
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
fos = fs.create(new Path("tmpfile0"));
fos.write(new byte[] { 0, 1, 2, 3 });
// Cause merge to fail in next checkpoint.
Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
secondary = startSecondaryNameNode(conf);
secondary.doWork();
// Fail if we get here.
fail("2NN did not exit.");
} catch (ExitException ee) {
// ignore
ExitUtil.resetFirstExitException();
assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1);
} finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
Mockito.reset(faultInjector);
}
}
Aggregations