Search in sources :

Example 36 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testMultipleReturnCodeMerging.

@Test
public void testMultipleReturnCodeMerging() throws Exception {
    // Two handlers which return two non-zero values
    RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
    Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toReturn(new RefreshResponse(23, "Twenty Three"));
    RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
    Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toReturn(new RefreshResponse(10, "Ten"));
    // Then registered to the same ID
    RefreshRegistry.defaultRegistry().register("shared", handlerOne);
    RefreshRegistry.defaultRegistry().register("shared", handlerTwo);
    // We refresh both
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "shared" };
    int exitCode = admin.run(args);
    // We get -1 because of our logic for melding non-zero return codes
    assertEquals(-1, exitCode);
    // Verify we called both
    Mockito.verify(handlerOne).handleRefresh("shared", new String[] {});
    Mockito.verify(handlerTwo).handleRefresh("shared", new String[] {});
    RefreshRegistry.defaultRegistry().unregisterAll("shared");
}
Also used : RefreshResponse(org.apache.hadoop.ipc.RefreshResponse) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) RefreshHandler(org.apache.hadoop.ipc.RefreshHandler) Test(org.junit.Test)

Example 37 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPipelineRecoveryOnOOB.

/**
   * Test recovery on restart OOB message. It also tests the delivery of 
   * OOB ack originating from the primary datanode. Since there is only
   * one node in the cluster, failure of restart-recovery will fail the
   * test.
   */
@Test
public void testPipelineRecoveryOnOOB() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 1;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fileSys = cluster.getFileSystem();
        Path file = new Path("dataprotocol2.dat");
        DFSTestUtil.createFile(fileSys, file, 10240L, (short) 1, 0L);
        DFSOutputStream out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
        out.write(1);
        out.hflush();
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        DataNode dn = cluster.getDataNodes().get(0);
        final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
        // issue shutdown to the datanode.
        final String[] args1 = { "-shutdownDatanode", dnAddr, "upgrade" };
        Assert.assertEquals(0, dfsadmin.run(args1));
        // Wait long enough to receive an OOB ack before closing the file.
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        // Retart the datanode 
        cluster.restartDataNode(0, true);
        // The following forces a data packet and end of block packets to be sent. 
        out.close();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 38 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestClientProtocolForPipelineRecovery method testPipelineRecoveryOnRestartFailure.

/** Test restart timeout */
@Test
public void testPipelineRecoveryOnRestartFailure() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
    MiniDFSCluster cluster = null;
    try {
        int numDataNodes = 2;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        FileSystem fileSys = cluster.getFileSystem();
        Path file = new Path("dataprotocol3.dat");
        DFSTestUtil.createFile(fileSys, file, 10240L, (short) 2, 0L);
        DFSOutputStream out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
        out.write(1);
        out.hflush();
        DFSAdmin dfsadmin = new DFSAdmin(conf);
        DataNode dn = cluster.getDataNodes().get(0);
        final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
        // issue shutdown to the datanode.
        final String[] args1 = { "-shutdownDatanode", dnAddr1, "upgrade" };
        Assert.assertEquals(0, dfsadmin.run(args1));
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        // This should succeed without restarting the node. The restart will
        // expire and regular pipeline recovery will kick in. 
        out.close();
        // At this point there is only one node in the cluster. 
        out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
        out.write(1);
        out.hflush();
        dn = cluster.getDataNodes().get(1);
        final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
        // issue shutdown to the datanode.
        final String[] args2 = { "-shutdownDatanode", dnAddr2, "upgrade" };
        Assert.assertEquals(0, dfsadmin.run(args2));
        GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
        try {
            // close should fail
            out.close();
            assert false;
        } catch (IOException ioe) {
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 39 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestBalancerBandwidth method testBalancerBandwidth.

@Test
public void testBalancerBandwidth() throws Exception {
    /* Set bandwidthPerSec to a low value of 1M bps. */
    conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, DEFAULT_BANDWIDTH);
    /* Create and start cluster */
    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build()) {
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        // Ensure value from the configuration is reflected in the datanodes.
        assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(0).getBalancerBandwidth());
        assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(1).getBalancerBandwidth());
        DFSAdmin admin = new DFSAdmin(conf);
        String dn1Address = datanodes.get(0).ipcServer.getListenerAddress().getHostName() + ":" + datanodes.get(0).getIpcPort();
        String dn2Address = datanodes.get(1).ipcServer.getListenerAddress().getHostName() + ":" + datanodes.get(1).getIpcPort();
        // verifies the dfsadmin command execution
        String[] args = new String[] { "-getBalancerBandwidth", dn1Address };
        runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
        args = new String[] { "-getBalancerBandwidth", dn2Address };
        runGetBalancerBandwidthCmd(admin, args, DEFAULT_BANDWIDTH);
        // Dynamically change balancer bandwidth and ensure the updated value
        // is reflected on the datanodes.
        // 12M bps
        long newBandwidth = 12 * DEFAULT_BANDWIDTH;
        fs.setBalancerBandwidth(newBandwidth);
        verifyBalancerBandwidth(datanodes, newBandwidth);
        // verifies the dfsadmin command execution
        args = new String[] { "-getBalancerBandwidth", dn1Address };
        runGetBalancerBandwidthCmd(admin, args, newBandwidth);
        args = new String[] { "-getBalancerBandwidth", dn2Address };
        runGetBalancerBandwidthCmd(admin, args, newBandwidth);
        // Dynamically change balancer bandwidth to 0. Balancer bandwidth on the
        // datanodes should remain as it was.
        fs.setBalancerBandwidth(0);
        verifyBalancerBandwidth(datanodes, newBandwidth);
        // verifies the dfsadmin command execution
        args = new String[] { "-getBalancerBandwidth", dn1Address };
        runGetBalancerBandwidthCmd(admin, args, newBandwidth);
        args = new String[] { "-getBalancerBandwidth", dn2Address };
        runGetBalancerBandwidthCmd(admin, args, newBandwidth);
    }
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 40 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestDFSShell method testInvalidShell.

/**
   * default setting is file:// which is not a DFS
   * so DFSAdmin should throw and catch InvalidArgumentException
   * and return -1 exit code.
   * @throws Exception
   */
@Test(timeout = 30000)
public void testInvalidShell() throws Exception {
    // default FS (non-DFS)
    Configuration conf = new Configuration();
    DFSAdmin admin = new DFSAdmin();
    admin.setConf(conf);
    int res = admin.run(new String[] { "-refreshNodes" });
    assertEquals("expected to fail -1", res, -1);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2