Search in sources :

Example 6 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestClusterId method testFormatWithNonInteractive.

/**
   * Test namenode format with -format -nonInteractive options when a non empty
   * name directory exists. Format should not succeed.
   * 
   * @throws IOException
   */
@Test
public void testFormatWithNonInteractive() throws IOException {
    // we check for a non empty dir, so create a child path
    File data = new File(hdfsDir, "file");
    if (!data.mkdirs()) {
        fail("Failed to create dir " + data.getPath());
    }
    String[] argv = { "-format", "-nonInteractive" };
    try {
        NameNode.createNameNode(argv, config);
        fail("createNameNode() did not call System.exit()");
    } catch (ExitException e) {
        assertEquals("Format should have been aborted with exit code 1", 1, e.status);
    }
    // check if the version file does not exists.
    File version = new File(hdfsDir, "current/VERSION");
    assertFalse("Check version should not exist", version.exists());
}
Also used : File(java.io.File) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Example 7 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestCheckpoint method testTooManyEditReplayFailures.

/*
   * Simulate 2NN exit due to too many merge failures.
   */
@Test(timeout = 30000)
public void testTooManyEditReplayFailures() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
    FSDataOutputStream fos = null;
    SecondaryNameNode secondary = null;
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        fos = fs.create(new Path("tmpfile0"));
        fos.write(new byte[] { 0, 1, 2, 3 });
        // Cause merge to fail in next checkpoint.
        Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
        secondary = startSecondaryNameNode(conf);
        secondary.doWork();
        // Fail if we get here.
        fail("2NN did not exit.");
    } catch (ExitException ee) {
        // ignore
        ExitUtil.resetFirstExitException();
        assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1);
    } finally {
        if (fs != null) {
            fs.close();
        }
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
        Mockito.reset(faultInjector);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Aggregations

ExitException (org.apache.hadoop.util.ExitUtil.ExitException)7 Test (org.junit.Test)6 File (java.io.File)4 Configuration (org.apache.hadoop.conf.Configuration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 InputStream (java.io.InputStream)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 IOException (java.io.IOException)1 PrintStream (java.io.PrintStream)1 URI (java.net.URI)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)1 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)1