use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestClientProtocolForPipelineRecovery method testEvictWriter.
/**
* Test that the writer is kicked out of a node.
*/
@Test
public void testEvictWriter() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes((int) 3).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
Path file = new Path("testEvictWriter.dat");
FSDataOutputStream out = fs.create(file, (short) 2);
out.write(0x31);
out.hflush();
// get nodes in the pipeline
DFSOutputStream dfsOut = (DFSOutputStream) out.getWrappedStream();
DatanodeInfo[] nodes = dfsOut.getPipeline();
Assert.assertEquals(2, nodes.length);
String dnAddr = nodes[1].getIpcAddr(false);
// evict the writer from the second datanode and wait until
// the pipeline is rebuilt.
DFSAdmin dfsadmin = new DFSAdmin(conf);
final String[] args1 = { "-evictWriters", dnAddr };
Assert.assertEquals(0, dfsadmin.run(args1));
out.write(0x31);
out.hflush();
// get the new pipline and check the node is not in there.
nodes = dfsOut.getPipeline();
try {
Assert.assertTrue(nodes.length > 0);
for (int i = 0; i < nodes.length; i++) {
Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
}
} finally {
out.close();
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestRollingUpgrade method testDFSAdminRollingUpgradeCommands.
/**
* Test DFSAdmin Upgrade Command.
*/
@Test
public void testDFSAdminRollingUpgradeCommands() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
final Path baz = new Path("/baz");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
dfs.mkdirs(foo);
//illegal argument "abc" to rollingUpgrade option
runCmd(dfsadmin, false, "-rollingUpgrade", "abc");
checkMxBeanIsNull();
//query rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade");
//start rolling upgrade
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
runCmd(dfsadmin, true, "-rollingUpgrade", "prepare");
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
//query rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade", "query");
checkMxBean();
dfs.mkdirs(bar);
//finalize rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
// RollingUpgradeInfo should be null after finalization, both via
// Java API and in JMX
assertNull(dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
checkMxBeanIsNull();
dfs.mkdirs(baz);
runCmd(dfsadmin, true, "-rollingUpgrade");
// All directories created before upgrade, when upgrade in progress and
// after upgrade finalize exists
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
// Ensure directories exist after restart
cluster.restartNameNode();
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
}
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestRollingUpgrade method testDFSAdminDatanodeUpgradeControlCommands.
@Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
DataNode dn = cluster.getDataNodes().get(0);
// check the datanode
final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
final String[] args1 = { "-getDatanodeInfo", dnAddr };
runCmd(dfsadmin, true, args1);
// issue shutdown to the datanode.
final String[] args2 = { "-shutdownDatanode", dnAddr, "upgrade" };
runCmd(dfsadmin, true, args2);
// the datanode should be down.
GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
// ping should fail.
assertEquals(-1, dfsadmin.run(args1));
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class DFSTestUtil method DFSAdminRun.
public static void DFSAdminRun(String cmd, int retcode, String contain, Configuration conf) throws Exception {
DFSAdmin admin = new DFSAdmin(new Configuration(conf));
toolRun(admin, cmd, retcode, contain);
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestQuota method testQuotaCommandsWithURI.
/**
* Test to all the commands by passing the fully qualified path.
*/
@Test(timeout = 30000)
public void testQuotaCommandsWithURI() throws Exception {
DFSAdmin dfsAdmin = new DFSAdmin(conf);
final Path dir = new Path("/" + this.getClass().getSimpleName(), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(dir));
/* set space quota */
testSetAndClearSpaceQuotaRegularInternal(new String[] { "-setSpaceQuota", "1024", dfs.getUri() + "/" + dir.toString() }, dir, 0, 1024);
/* clear space quota */
testSetAndClearSpaceQuotaRegularInternal(new String[] { "-clrSpaceQuota", dfs.getUri() + "/" + dir.toString() }, dir, 0, -1);
runCommand(dfsAdmin, false, "-setQuota", "1000", dfs.getUri() + "/" + dir.toString());
runCommand(dfsAdmin, false, "-clrQuota", dfs.getUri() + "/" + dir.toString());
}
Aggregations