Search in sources :

Example 11 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestCheckpoint method testSaveNamespace.

/**
   * Tests save namespace.
   */
@Test
public void testSaveNamespace() throws IOException {
    MiniDFSCluster cluster = null;
    DistributedFileSystem fs = null;
    FileContext fc;
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
        cluster.waitActive();
        fs = (cluster.getFileSystem());
        fc = FileContext.getFileContext(cluster.getURI(0));
        // Saving image without safe mode should fail
        DFSAdmin admin = new DFSAdmin(conf);
        String[] args = new String[] { "-saveNamespace" };
        try {
            admin.run(args);
        } catch (IOException eIO) {
            assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
        } catch (Exception e) {
            throw new IOException(e);
        }
        // create new file
        Path file = new Path("namespace.dat");
        DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize, replication, seed);
        checkFile(fs, file, replication);
        // create new link
        Path symlink = new Path("file.link");
        fc.createSymlink(file, symlink, false);
        assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
        // verify that the edits file is NOT empty
        Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
        for (URI uri : editsDirs) {
            File ed = new File(uri.getPath());
            assertTrue(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
        }
        // Saving image in safe mode should succeed
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            admin.run(args);
        } catch (Exception e) {
            throw new IOException(e);
        }
        // TODO: Fix the test to not require a hard-coded transaction count.
        final int EXPECTED_TXNS_FIRST_SEG = 13;
        //
        for (URI uri : editsDirs) {
            File ed = new File(uri.getPath());
            File curDir = new File(ed, "current");
            LOG.info("Files in " + curDir + ":\n  " + Joiner.on("\n  ").join(curDir.list()));
            // Verify that the first edits file got finalized
            File originalEdits = new File(curDir, NNStorage.getInProgressEditsFileName(1));
            assertFalse(originalEdits.exists());
            File finalizedEdits = new File(curDir, NNStorage.getFinalizedEditsFileName(1, EXPECTED_TXNS_FIRST_SEG));
            GenericTestUtils.assertExists(finalizedEdits);
            assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
            GenericTestUtils.assertExists(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
        }
        Collection<URI> imageDirs = cluster.getNameDirs(0);
        for (URI uri : imageDirs) {
            File imageDir = new File(uri.getPath());
            File savedImage = new File(imageDir, "current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
            assertTrue("Should have saved image at " + savedImage, savedImage.exists());
        }
        // restart cluster and verify file exists
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        fs = (cluster.getFileSystem());
        checkFile(fs, file, replication);
        fc = FileContext.getFileContext(cluster.getURI(0));
        assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
    } finally {
        if (fs != null)
            fs.close();
        cleanup(cluster);
        cluster = null;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) Util.fileAsURI(org.apache.hadoop.hdfs.server.common.Util.fileAsURI) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) ParseException(org.apache.commons.cli.ParseException) IOException(java.io.IOException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 12 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestTools method testDFSAdminInvalidUsageHelp.

@Test
public void testDFSAdminInvalidUsageHelp() {
    ImmutableSet<String> args = ImmutableSet.of("-report", "-saveNamespace", "-rollEdits", "-restoreFailedStorage", "-refreshNodes", "-finalizeUpgrade", "-metasave", "-refreshUserToGroupsMappings", "-printTopology", "-refreshNamenodes", "-deleteBlockPool", "-setBalancerBandwidth", "-fetchImage");
    try {
        for (String arg : args) assertTrue(ToolRunner.run(new DFSAdmin(), fillArgs(arg)) == -1);
        assertTrue(ToolRunner.run(new DFSAdmin(), new String[] { "-help", "-some" }) == 0);
    } catch (Exception e) {
        fail("testDFSAdminHelp error" + e);
    }
    String pattern = "Usage: hdfs dfsadmin";
    checkOutput(new String[] { "-cancel", "-renew" }, pattern, System.err, DFSAdmin.class);
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) Test(org.junit.Test)

Example 13 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testMultipleRegistration.

@Test
public void testMultipleRegistration() throws Exception {
    RefreshRegistry.defaultRegistry().register("sharedId", firstHandler);
    RefreshRegistry.defaultRegistry().register("sharedId", secondHandler);
    // this should trigger both
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "sharedId", "one" };
    int exitCode = admin.run(args);
    // -1 because one of the responses is unregistered
    assertEquals(-1, exitCode);
    // verify we called both
    Mockito.verify(firstHandler).handleRefresh("sharedId", new String[] { "one" });
    Mockito.verify(secondHandler).handleRefresh("sharedId", new String[] { "one" });
    RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 14 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testInvalidCommand.

@Test
public void testInvalidCommand() throws Exception {
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "nn" };
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 15 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testVariableArgs.

@Test
public void testVariableArgs() throws Exception {
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "secondHandler", "one" };
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should return 2", 2, exitCode);
    exitCode = admin.run(new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "secondHandler", "one", "two" });
    assertEquals("DFSAdmin should now return 3", 3, exitCode);
    Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[] { "one" });
    Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[] { "one", "two" });
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2