Search in sources :

Example 21 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestDataNodeRollingUpgrade method startRollingUpgrade.

private void startRollingUpgrade() throws Exception {
    LOG.info("Starting rolling upgrade");
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    final DFSAdmin dfsadmin = new DFSAdmin(conf);
    TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
    triggerHeartBeats();
    // Ensure datanode rolling upgrade is started
    assertTrue(dn0.getFSDataset().trashEnabled(blockPoolId));
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin)

Example 22 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testExceptionResultsInNormalError.

@Test
public void testExceptionResultsInNormalError() throws Exception {
    // In this test, we ensure that all handlers are called even if we throw an exception in one
    RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
    Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
    RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
    Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toThrow(new RuntimeException("More Exceptions"));
    RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
    RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "exceptional" };
    int exitCode = admin.run(args);
    // Exceptions result in a -1
    assertEquals(-1, exitCode);
    Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[] {});
    Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[] {});
    RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) RefreshHandler(org.apache.hadoop.ipc.RefreshHandler) Test(org.junit.Test)

Example 23 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testMultipleReturnCodeMerging.

@Test
public void testMultipleReturnCodeMerging() throws Exception {
    // Two handlers which return two non-zero values
    RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
    Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toReturn(new RefreshResponse(23, "Twenty Three"));
    RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
    Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toReturn(new RefreshResponse(10, "Ten"));
    // Then registered to the same ID
    RefreshRegistry.defaultRegistry().register("shared", handlerOne);
    RefreshRegistry.defaultRegistry().register("shared", handlerTwo);
    // We refresh both
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "shared" };
    int exitCode = admin.run(args);
    // We get -1 because of our logic for melding non-zero return codes
    assertEquals(-1, exitCode);
    // Verify we called both
    Mockito.verify(handlerOne).handleRefresh("shared", new String[] {});
    Mockito.verify(handlerTwo).handleRefresh("shared", new String[] {});
    RefreshRegistry.defaultRegistry().unregisterAll("shared");
}
Also used : RefreshResponse(org.apache.hadoop.ipc.RefreshResponse) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) RefreshHandler(org.apache.hadoop.ipc.RefreshHandler) Test(org.junit.Test)

Example 24 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPipelineRecoveryOnOOB.

/**
   * Test recovery on restart OOB message. It also tests the delivery of 
   * OOB ack originating from the primary datanode. Since there is only
   * one node in the cluster, failure of restart-recovery will fail the
   * test.
   */
@Test
public void testPipelineRecoveryOnOOB() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 1;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fileSys = cluster.getFileSystem();
        Path file = new Path("dataprotocol2.dat");
        DFSTestUtil.createFile(fileSys, file, 10240L, (short) 1, 0L);
        DFSOutputStream out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
        out.write(1);
        out.hflush();
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        DataNode dn = cluster.getDataNodes().get(0);
        final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
        // issue shutdown to the datanode.
        final String[] args1 = { "-shutdownDatanode", dnAddr, "upgrade" };
        Assert.assertEquals(0, dfsadmin.run(args1));
        // Wait long enough to receive an OOB ack before closing the file.
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        // Retart the datanode 
        cluster.restartDataNode(0, true);
        // The following forces a data packet and end of block packets to be sent. 
        out.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 25 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPipelineRecoveryOnRestartFailure.

/** Test restart timeout */
@Test
public void testPipelineRecoveryOnRestartFailure() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 2;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fileSys = cluster.getFileSystem();
        Path file = new Path("dataprotocol3.dat");
        DFSTestUtil.createFile(fileSys, file, 10240L, (short) 2, 0L);
        DFSOutputStream out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
        out.write(1);
        out.hflush();
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        DataNode dn = cluster.getDataNodes().get(0);
        final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
        // issue shutdown to the datanode.
        final String[] args1 = { "-shutdownDatanode", dnAddr1, "upgrade" };
        Assert.assertEquals(0, dfsadmin.run(args1));
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        // This should succeed without restarting the node. The restart will
        // expire and regular pipeline recovery will kick in. 
        out.close();
        // At this point there is only one node in the cluster. 
        out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
        out.write(1);
        out.hflush();
        dn = cluster.getDataNodes().get(1);
        final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
        // issue shutdown to the datanode.
        final String[] args2 = { "-shutdownDatanode", dnAddr2, "upgrade" };
        Assert.assertEquals(0, dfsadmin.run(args2));
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        try {
            // close should fail
            out.close();
            assert false;
        } catch (IOException ioe) {
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2