Search in sources :

Example 16 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testUnregistration.

@Test
public void testUnregistration() throws Exception {
    RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
    // And now this should fail
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "firstHandler" };
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should return -1", -1, exitCode);
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 17 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestRefreshCallQueue method testRefresh.

@Test
public void testRefresh() throws Exception {
    assertTrue("Mock queue should have been constructed", mockQueueConstructions > 0);
    assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
    int lastMockQueueConstructions = mockQueueConstructions;
    // Replace queue with the queue specified in core-site.xml, which would be the LinkedBlockingQueue
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refreshCallQueue" };
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should return 0", 0, exitCode);
    assertEquals("Mock queue should have no additional constructions", lastMockQueueConstructions, mockQueueConstructions);
    try {
        assertFalse("Puts are routed through LBQ instead of MockQueue", canPutInMockQueue());
    } catch (IOException ioe) {
        fail("Could not put into queue at all");
    }
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) IOException(java.io.IOException) Test(org.junit.Test)

Example 18 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testInvalidIdentifier.

@Test
public void testInvalidIdentifier() throws Exception {
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "unregisteredIdentity" };
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 19 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testValidIdentifier.

@Test
public void testValidIdentifier() throws Exception {
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "firstHandler" };
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should succeed", 0, exitCode);
    Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[] {});
    // Second handler was never called
    Mockito.verify(secondHandler, Mockito.never()).handleRefresh(Mockito.anyString(), Mockito.any(String[].class));
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 20 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestDeleteBlockPool method testDfsAdminDeleteBlockPool.

@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1,namesServerId2");
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build();
        cluster.waitActive();
        FileSystem fs1 = cluster.getFileSystem(0);
        FileSystem fs2 = cluster.getFileSystem(1);
        DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
        DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
        DataNode dn1 = cluster.getDataNodes().get(0);
        String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
        String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
        Configuration nn1Conf = cluster.getConfiguration(0);
        nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
        dn1.refreshNamenodes(nn1Conf);
        assertEquals(1, dn1.getAllBpOs().size());
        DFSAdmin admin = new DFSAdmin(nn1Conf);
        String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
        String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
        int ret = admin.run(args);
        assertFalse(0 == ret);
        cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
        String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
        ret = admin.run(forceArgs);
        assertEquals(0, ret);
        cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid2);
        //bpid1 remains good
        cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2