Search in sources :

Example 26 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestQuota method testSetAndClearSpaceQuotaRegularInternal.

private void testSetAndClearSpaceQuotaRegularInternal(final String[] args, final Path dir, final int cmdRet, final int spaceQuota) throws Exception {
    resetStream();
    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    final List<String> outs = Lists.newArrayList();
    final int ret = ToolRunner.run(dfsAdmin, args);
    assertEquals(cmdRet, ret);
    final QuotaUsage quotaUsage = dfs.getQuotaUsage(dir);
    assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
    scanIntoList(OUT_STREAM, outs);
    assertTrue("There should be no output if it runs successfully.", outs.isEmpty());
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) QuotaUsage(org.apache.hadoop.fs.QuotaUsage)

Example 27 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestQuota method testSetAndClearSpaceQuotaByStorageTypeInternal.

private void testSetAndClearSpaceQuotaByStorageTypeInternal(final String[] args, final Path dir, final int cmdRet, final int spaceQuota, final int spaceQuotaByStorageType) throws Exception {
    resetStream();
    final DFSAdmin dfsAdmin = new DFSAdmin(conf);
    final List<String> outs = Lists.newArrayList();
    final int ret = ToolRunner.run(dfsAdmin, args);
    assertEquals(cmdRet, ret);
    final QuotaUsage quotaUsage = dfs.getQuotaUsage(dir);
    assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
    assertEquals(spaceQuotaByStorageType, quotaUsage.getTypeQuota(StorageType.DISK));
    scanIntoList(OUT_STREAM, outs);
    assertTrue("There should be no output if it runs successfully.", outs.isEmpty());
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) QuotaUsage(org.apache.hadoop.fs.QuotaUsage)

Example 28 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestRollingUpgradeRollback method testRollbackWithQJM.

@Test
public void testRollbackWithQJM() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    MiniJournalCluster mjc = null;
    MiniDFSCluster cluster = null;
    final Path foo = new Path("/foo");
    final Path bar = new Path("/bar");
    try {
        mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build();
        mjc.waitActive();
        conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI(JOURNAL_ID).toString());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        DistributedFileSystem dfs = cluster.getFileSystem();
        final DFSAdmin dfsadmin = new DFSAdmin(conf);
        dfs.mkdirs(foo);
        // start rolling upgrade
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // create new directory
        dfs.mkdirs(bar);
        dfs.close();
        // rollback
        cluster.restartNameNode("-rollingUpgrade", "rollback");
        // make sure /foo is still there, but /bar is not
        dfs = cluster.getFileSystem();
        Assert.assertTrue(dfs.exists(foo));
        Assert.assertFalse(dfs.exists(bar));
        // check storage in JNs
        for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
            File dir = mjc.getCurrentDir(0, JOURNAL_ID);
            // segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
            // marker, mkdir, endSegment)
            checkJNStorage(dir, 4, 7);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        if (mjc != null) {
            mjc.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) MiniJournalCluster(org.apache.hadoop.hdfs.qjournal.MiniJournalCluster) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) File(java.io.File) Test(org.junit.Test)

Example 29 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestDataNodeRollingUpgrade method finalizeRollingUpgrade.

private void finalizeRollingUpgrade() throws Exception {
    LOG.info("Finalizing rolling upgrade");
    final DFSAdmin dfsadmin = new DFSAdmin(conf);
    TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
    triggerHeartBeats();
    // Ensure datanode rolling upgrade is started
    assertFalse(dn0.getFSDataset().trashEnabled(blockPoolId));
    BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId);
    assertFalse(bps.trashEnabled());
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin)

Example 30 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestDecommissioningStatus method testDecommissionStatus.

/**
   * Tests Decommissioning Status in DFS.
   */
@Test
public void testDecommissionStatus() throws Exception {
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", 2, info.length);
    DistributedFileSystem fileSys = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
    short replicas = numDatanodes;
    //
    // Decommission one node. Verify the decommission status
    //
    Path file1 = new Path("decommission.dat");
    DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, replicas, seed);
    Path file2 = new Path("decommission1.dat");
    FSDataOutputStream st1 = AdminStatesBaseTest.writeIncompleteFile(fileSys, file2, replicas, (short) (fileSize / blockSize));
    for (DataNode d : cluster.getDataNodes()) {
        DataNodeTestUtils.triggerBlockReport(d);
    }
    FSNamesystem fsn = cluster.getNamesystem();
    final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
    for (int iteration = 0; iteration < numDatanodes; iteration++) {
        String downnode = decommissionNode(client, iteration);
        dm.refreshNodes(conf);
        decommissionedNodes.add(downnode);
        BlockManagerTestUtil.recheckDecommissionState(dm);
        final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
        if (iteration == 0) {
            assertEquals(decommissioningNodes.size(), 1);
            DatanodeDescriptor decommNode = decommissioningNodes.get(0);
            checkDecommissionStatus(decommNode, 3, 0, 1);
            checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1), fileSys, admin);
        } else {
            assertEquals(decommissioningNodes.size(), 2);
            DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
            DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
            // This one is still 3,3,1 since it passed over the UC block 
            // earlier, before node 2 was decommed
            checkDecommissionStatus(decommNode1, 3, 3, 1);
            // This one is 4,4,2 since it has the full state
            checkDecommissionStatus(decommNode2, 4, 4, 2);
            checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2), fileSys, admin);
        }
    }
    // Call refreshNodes on FSNamesystem with empty exclude file.
    // This will remove the datanodes from decommissioning list and
    // make them available again.
    hostsFileWriter.initExcludeHost("");
    dm.refreshNodes(conf);
    st1.close();
    AdminStatesBaseTest.cleanupFile(fileSys, file1);
    AdminStatesBaseTest.cleanupFile(fileSys, file2);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) InetSocketAddress(java.net.InetSocketAddress) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test) AdminStatesBaseTest(org.apache.hadoop.hdfs.AdminStatesBaseTest)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2