Search in sources :

Example 1 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testEvictWriter.

/**
   * Test that the writer is kicked out of a node.
   */
@Test
public void testEvictWriter() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes((int) 3).build();
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        Path file = new Path("testEvictWriter.dat");
        FSDataOutputStream out = fs.create(file, (short) 2);
        out.write(0x31);
        out.hflush();
        // get nodes in the pipeline
        DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
        DatanodeInfo[] nodes = dfsOut.getPipeline();
        Assert.assertEquals(2, nodes.length);
        String dnAddr = nodes[1].getIpcAddr(false);
        // evict the writer from the second datanode and wait until
        // the pipeline is rebuilt.
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        final String[] args1 = { "-evictWriters", dnAddr };
        Assert.assertEquals(0, dfsadmin.run(args1));
        out.write(0x31);
        out.hflush();
        // get the new pipline and check the node is not in there.
        nodes = dfsOut.getPipeline();
        try {
            Assert.assertTrue(nodes.length > 0);
            for (int i = 0; i < nodes.length; i++) {
                Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
            }
        } finally {
            out.close();
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 2 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestRollingUpgrade method testDFSAdminRollingUpgradeCommands.

/**
   * Test DFSAdmin Upgrade Command.
   */
@Test
public void testDFSAdminRollingUpgradeCommands() throws Exception {
    // start a cluster
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        final Path foo = new Path("/foo");
        final Path bar = new Path("/bar");
        final Path baz = new Path("/baz");
        {
            final DistributedFileSystem dfs = cluster.getFileSystem();
            final DFSAdmin dfsadmin = new DFSAdmin(conf);
            dfs.mkdirs(foo);
            //illegal argument "abc" to rollingUpgrade option
            runCmd(dfsadmin, false, "-rollingUpgrade", "abc");
            checkMxBeanIsNull();
            //query rolling upgrade
            runCmd(dfsadmin, true, "-rollingUpgrade");
            //start rolling upgrade
            dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
            runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
            dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
            //query rolling upgrade
            runCmd(dfsadmin, true, "-rollingUpgrade", "query");
            checkMxBean();
            dfs.mkdirs(bar);
            //finalize rolling upgrade
            runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
            // RollingUpgradeInfo should be null after finalization, both via
            // Java API and in JMX
            assertNull(dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
            checkMxBeanIsNull();
            dfs.mkdirs(baz);
            runCmd(dfsadmin, true, "-rollingUpgrade");
            // All directories created before upgrade, when upgrade in progress and
            // after upgrade finalize exists
            Assert.assertTrue(dfs.exists(foo));
            Assert.assertTrue(dfs.exists(bar));
            Assert.assertTrue(dfs.exists(baz));
            dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
            dfs.saveNamespace();
            dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        }
        // Ensure directories exist after restart
        cluster.restartNameNode();
        {
            final DistributedFileSystem dfs = cluster.getFileSystem();
            Assert.assertTrue(dfs.exists(foo));
            Assert.assertTrue(dfs.exists(bar));
            Assert.assertTrue(dfs.exists(baz));
        }
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 3 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestRollingUpgrade method testDFSAdminDatanodeUpgradeControlCommands.

@Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
    // start a cluster
    final Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final DFSAdmin dfsadmin = new DFSAdmin(conf);
        DataNode dn = cluster.getDataNodes().get(0);
        // check the datanode
        final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
        final String[] args1 = { "-getDatanodeInfo", dnAddr };
        runCmd(dfsadmin, true, args1);
        // issue shutdown to the datanode.
        final String[] args2 = { "-shutdownDatanode", dnAddr, "upgrade" };
        runCmd(dfsadmin, true, args2);
        // the datanode should be down.
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
        // ping should fail.
        assertEquals(-1, dfsadmin.run(args1));
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 4 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class DFSTestUtil method DFSAdminRun.

public static void DFSAdminRun(String cmd, int retcode, String contain, Configuration conf) throws Exception {
    DFSAdmin admin = new DFSAdmin(new Configuration(conf));
    toolRun(admin, cmd, retcode, contain);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin)

Example 5 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestQuota method testQuotaCommandsWithURI.

/**
   * Test to all the commands by passing the fully qualified path.
   */
@Test(timeout = 30000)
public void testQuotaCommandsWithURI() throws Exception {
    DFSAdmin dfsAdmin = new DFSAdmin(conf);
    final Path dir = new Path("/" + this.getClass().getSimpleName(), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(dir));
    /* set space quota */
    testSetAndClearSpaceQuotaRegularInternal(new String[] { "-setSpaceQuota", "1024", dfs.getUri() + "/" + dir.toString() }, dir, 0, 1024);
    /* clear space quota */
    testSetAndClearSpaceQuotaRegularInternal(new String[] { "-clrSpaceQuota", dfs.getUri() + "/" + dir.toString() }, dir, 0, -1);
    runCommand(dfsAdmin, false, "-setQuota", "1000", dfs.getUri() + "/" + dir.toString());
    runCommand(dfsAdmin, false, "-clrQuota", dfs.getUri() + "/" + dir.toString());
}
Also used : Path(org.apache.hadoop.fs.Path) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2