Search in sources :

Example 1 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestFailureOfSharedDir method testFailureOfSharedDir.

/**
   * Test that marking the shared edits dir as being "required" causes the NN to
   * fail if that dir can't be accessed.
   */
@Test
public void testFailureOfSharedDir() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 2000);
    // The shared edits dir will automatically be marked required.
    MiniDFSCluster cluster = null;
    File sharedEditsDir = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
        cluster.waitActive();
        cluster.transitionToActive(0);
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        assertTrue(fs.mkdirs(new Path("/test1")));
        // Blow away the shared edits dir.
        URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
        sharedEditsDir = new File(sharedEditsUri);
        assertEquals(0, FileUtil.chmod(sharedEditsDir.getAbsolutePath(), "-w", true));
        Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
        NameNode nn1 = cluster.getNameNode(1);
        assertTrue(nn1.isStandbyState());
        assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability", nn1.isInSafeMode());
        NameNode nn0 = cluster.getNameNode(0);
        try {
            // Make sure that subsequent operations on the NN fail.
            nn0.getRpcServer().rollEditLog();
            fail("Succeeded in rolling edit log despite shared dir being deleted");
        } catch (ExitException ee) {
            GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal", ee);
        }
        // dir didn't roll. Regression test for HDFS-2874.
        for (URI editsUri : cluster.getNameEditsDirs(0)) {
            if (editsUri.equals(sharedEditsUri)) {
                continue;
            }
            File editsDir = new File(editsUri.getPath());
            File curDir = new File(editsDir, "current");
            GenericTestUtils.assertGlobEquals(curDir, "edits_.*", NNStorage.getInProgressEditsFileName(1));
        }
    } finally {
        if (sharedEditsDir != null) {
            // without this test cleanup will fail
            FileUtil.chmod(sharedEditsDir.getAbsolutePath(), "+w", true);
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File) URI(java.net.URI) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Example 2 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestNativeLibraryChecker method expectOutput.

private void expectOutput(String[] args) {
    ExitUtil.disableSystemExit();
    ByteArrayOutputStream outContent = new ByteArrayOutputStream();
    PrintStream originalPs = System.out;
    System.setOut(new PrintStream(outContent));
    try {
        NativeLibraryChecker.main(args);
    } catch (ExitException e) {
        ExitUtil.resetFirstExitException();
    } finally {
        if (Shell.WINDOWS) {
            assertEquals(outContent.toString().indexOf("winutils: true") != -1, true);
        }
        if (NativeCodeLoader.isNativeCodeLoaded()) {
            assertEquals(outContent.toString().indexOf("hadoop:  true") != -1, true);
        }
        System.setOut(originalPs);
    }
}
Also used : PrintStream(java.io.PrintStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ExitException(org.apache.hadoop.util.ExitUtil.ExitException)

Example 3 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestClusterId method testFormatWithoutForceEnterYes.

/**
   * Test namenode format with -format option when a non empty name directory
   * exists. Enter Y when prompted and the format should succeed.
   * 
   * @throws IOException
   * @throws InterruptedException
   */
@Test
public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException {
    // we check for a non empty dir, so create a child path
    File data = new File(hdfsDir, "file");
    if (!data.mkdirs()) {
        fail("Failed to create dir " + data.getPath());
    }
    // capture the input stream
    InputStream origIn = System.in;
    ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
    System.setIn(bins);
    String[] argv = { "-format" };
    try {
        NameNode.createNameNode(argv, config);
        fail("createNameNode() did not call System.exit()");
    } catch (ExitException e) {
        assertEquals("Format should have succeeded", 0, e.status);
    }
    System.setIn(origIn);
    String cid = getClusterId(config);
    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) File(java.io.File) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Example 4 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestStateTransitionFailure method testFailureToTransitionCausesShutdown.

/**
   * Ensure that a failure to fully transition to the active state causes a
   * shutdown of the NameNode.
   */
@Test
public void testFailureToTransitionCausesShutdown() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new Configuration();
        // Set an illegal value for the trash emptier interval. This will cause
        // the NN to fail to transition to the active state.
        conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, -1);
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
        cluster.waitActive();
        try {
            cluster.transitionToActive(0);
            fail("Transitioned to active but should not have been able to.");
        } catch (ExitException ee) {
            assertExceptionContains("Cannot start trash emptier with negative interval", ee);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Example 5 with ExitException

use of org.apache.hadoop.util.ExitUtil.ExitException in project hadoop by apache.

the class TestCheckpoint method testTooManyEditReplayFailures.

/*
   * Simulate 2NN exit due to too many merge failures.
   */
@Test(timeout = 30000)
public void testTooManyEditReplayFailures() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
    FSDataOutputStream fos = null;
    SecondaryNameNode secondary = null;
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        fos = fs.create(new Path("tmpfile0"));
        fos.write(new byte[] { 0, 1, 2, 3 });
        // Cause merge to fail in next checkpoint.
        Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
        secondary = startSecondaryNameNode(conf);
        secondary.doWork();
        // Fail if we get here.
        fail("2NN did not exit.");
    } catch (ExitException ee) {
        // ignore
        ExitUtil.resetFirstExitException();
        assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1);
    } finally {
        if (fs != null) {
            fs.close();
        }
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
        Mockito.reset(faultInjector);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Aggregations

ExitException (org.apache.hadoop.util.ExitUtil.ExitException)7 Test (org.junit.Test)6 File (java.io.File)4 Configuration (org.apache.hadoop.conf.Configuration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 InputStream (java.io.InputStream)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 Path (org.apache.hadoop.fs.Path)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 IOException (java.io.IOException)1 PrintStream (java.io.PrintStream)1 URI (java.net.URI)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)1 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)1