use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestQuota method testSetAndClearSpaceQuotaRegularInternal.
private void testSetAndClearSpaceQuotaRegularInternal(final String[] args, final Path dir, final int cmdRet, final int spaceQuota) throws Exception {
resetStream();
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
final List<String> outs = Lists.newArrayList();
final int ret = ToolRunner.run(dfsAdmin, args);
assertEquals(cmdRet, ret);
final QuotaUsage quotaUsage = dfs.getQuotaUsage(dir);
assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
scanIntoList(OUT_STREAM, outs);
assertTrue("There should be no output if it runs successfully.", outs.isEmpty());
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestQuota method testSetAndClearSpaceQuotaByStorageTypeInternal.
private void testSetAndClearSpaceQuotaByStorageTypeInternal(final String[] args, final Path dir, final int cmdRet, final int spaceQuota, final int spaceQuotaByStorageType) throws Exception {
resetStream();
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
final List<String> outs = Lists.newArrayList();
final int ret = ToolRunner.run(dfsAdmin, args);
assertEquals(cmdRet, ret);
final QuotaUsage quotaUsage = dfs.getQuotaUsage(dir);
assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
assertEquals(spaceQuotaByStorageType, quotaUsage.getTypeQuota(StorageType.DISK));
scanIntoList(OUT_STREAM, outs);
assertTrue("There should be no output if it runs successfully.", outs.isEmpty());
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestRollingUpgradeRollback method testRollbackWithQJM.
@Test
public void testRollbackWithQJM() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniJournalCluster mjc = null;
MiniDFSCluster cluster = null;
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
try {
mjc = new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build();
mjc.waitActive();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI(JOURNAL_ID).toString());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
// start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0, dfsadmin.run(new String[] { "-rollingUpgrade", "prepare" }));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// create new directory
dfs.mkdirs(bar);
dfs.close();
// rollback
cluster.restartNameNode("-rollingUpgrade", "rollback");
// make sure /foo is still there, but /bar is not
dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
// check storage in JNs
for (int i = 0; i < NUM_JOURNAL_NODES; i++) {
File dir = mjc.getCurrentDir(0, JOURNAL_ID);
// segments:(startSegment, mkdir, endSegment), (startSegment, upgrade
// marker, mkdir, endSegment)
checkJNStorage(dir, 4, 7);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestDataNodeRollingUpgrade method finalizeRollingUpgrade.
private void finalizeRollingUpgrade() throws Exception {
LOG.info("Finalizing rolling upgrade");
final DFSAdmin dfsadmin = new DFSAdmin(conf);
TestRollingUpgrade.runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
triggerHeartBeats();
// Ensure datanode rolling upgrade is started
assertFalse(dn0.getFSDataset().trashEnabled(blockPoolId));
BlockPoolSliceStorage bps = dn0.getStorage().getBPStorage(blockPoolId);
assertFalse(bps.trashEnabled());
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestDecommissioningStatus method testDecommissionStatus.
/**
* Tests Decommissioning Status in DFS.
*/
@Test
public void testDecommissionStatus() throws Exception {
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", 2, info.length);
DistributedFileSystem fileSys = cluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
short replicas = numDatanodes;
//
// Decommission one node. Verify the decommission status
//
Path file1 = new Path("decommission.dat");
DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, replicas, seed);
Path file2 = new Path("decommission1.dat");
FSDataOutputStream st1 = AdminStatesBaseTest.writeIncompleteFile(fileSys, file2, replicas, (short) (fileSize / blockSize));
for (DataNode d : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(d);
}
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
for (int iteration = 0; iteration < numDatanodes; iteration++) {
String downnode = decommissionNode(client, iteration);
dm.refreshNodes(conf);
decommissionedNodes.add(downnode);
BlockManagerTestUtil.recheckDecommissionState(dm);
final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
if (iteration == 0) {
assertEquals(decommissioningNodes.size(), 1);
DatanodeDescriptor decommNode = decommissioningNodes.get(0);
checkDecommissionStatus(decommNode, 3, 0, 1);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1), fileSys, admin);
} else {
assertEquals(decommissioningNodes.size(), 2);
DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
// This one is still 3,3,1 since it passed over the UC block
// earlier, before node 2 was decommed
checkDecommissionStatus(decommNode1, 3, 3, 1);
// This one is 4,4,2 since it has the full state
checkDecommissionStatus(decommNode2, 4, 4, 2);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2), fileSys, admin);
}
}
// Call refreshNodes on FSNamesystem with empty exclude file.
// This will remove the datanodes from decommissioning list and
// make them available again.
hostsFileWriter.initExcludeHost("");
dm.refreshNodes(conf);
st1.close();
AdminStatesBaseTest.cleanupFile(fileSys, file1);
AdminStatesBaseTest.cleanupFile(fileSys, file2);
}
Aggregations