use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testUnregistration.
@Test
public void testUnregistration() throws Exception {
RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
// And now this should fail
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "firstHandler" };
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return -1", -1, exitCode);
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestRefreshCallQueue method testRefresh.
@Test
public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed", mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
int lastMockQueueConstructions = mockQueueConstructions;
// Replace queue with the queue specified in core-site.xml, which would be the LinkedBlockingQueue
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refreshCallQueue" };
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 0", 0, exitCode);
assertEquals("Mock queue should have no additional constructions", lastMockQueueConstructions, mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue", canPutInMockQueue());
} catch (IOException ioe) {
fail("Could not put into queue at all");
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testInvalidIdentifier.
@Test
public void testInvalidIdentifier() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "unregisteredIdentity" };
int exitCode = admin.run(args);
assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testValidIdentifier.
@Test
public void testValidIdentifier() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "firstHandler" };
int exitCode = admin.run(args);
assertEquals("DFSAdmin should succeed", 0, exitCode);
Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[] {});
// Second handler was never called
Mockito.verify(secondHandler, Mockito.never()).handleRefresh(Mockito.anyString(), Mockito.any(String[].class));
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestDeleteBlockPool method testDfsAdminDeleteBlockPool.
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1,namesServerId2");
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs1 = cluster.getFileSystem(0);
FileSystem fs2 = cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
DataNode dn1 = cluster.getDataNodes().get(0);
String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
Configuration nn1Conf = cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1, dn1.getAllBpOs().size());
DFSAdmin admin = new DFSAdmin(nn1Conf);
String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
int ret = admin.run(args);
assertFalse(0 == ret);
cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
ret = admin.run(forceArgs);
assertEquals(0, ret);
cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid2);
//bpid1 remains good
cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations