use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestCheckpoint method testSaveNamespace.
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
FileContext fc;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
fs = (cluster.getFileSystem());
fc = FileContext.getFileContext(cluster.getURI(0));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[] { "-saveNamespace" };
try {
admin.run(args);
} catch (IOException eIO) {
assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
} catch (Exception e) {
throw new IOException(e);
}
// create new file
Path file = new Path("namespace.dat");
DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize, replication, seed);
checkFile(fs, file, replication);
// create new link
Path symlink = new Path("file.link");
fc.createSymlink(file, symlink, false);
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
// verify that the edits file is NOT empty
Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
for (URI uri : editsDirs) {
File ed = new File(uri.getPath());
assertTrue(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch (Exception e) {
throw new IOException(e);
}
// TODO: Fix the test to not require a hard-coded transaction count.
final int EXPECTED_TXNS_FIRST_SEG = 13;
//
for (URI uri : editsDirs) {
File ed = new File(uri.getPath());
File curDir = new File(ed, "current");
LOG.info("Files in " + curDir + ":\n " + Joiner.on("\n ").join(curDir.list()));
// Verify that the first edits file got finalized
File originalEdits = new File(curDir, NNStorage.getInProgressEditsFileName(1));
assertFalse(originalEdits.exists());
File finalizedEdits = new File(curDir, NNStorage.getFinalizedEditsFileName(1, EXPECTED_TXNS_FIRST_SEG));
GenericTestUtils.assertExists(finalizedEdits);
assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
GenericTestUtils.assertExists(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
}
Collection<URI> imageDirs = cluster.getNameDirs(0);
for (URI uri : imageDirs) {
File imageDir = new File(uri.getPath());
File savedImage = new File(imageDir, "current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
assertTrue("Should have saved image at " + savedImage, savedImage.exists());
}
// restart cluster and verify file exists
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fs = (cluster.getFileSystem());
checkFile(fs, file, replication);
fc = FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
} finally {
if (fs != null)
fs.close();
cleanup(cluster);
cluster = null;
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestTools method testDFSAdminInvalidUsageHelp.
@Test
public void testDFSAdminInvalidUsageHelp() {
ImmutableSet<String> args = ImmutableSet.of("-report", "-saveNamespace", "-rollEdits", "-restoreFailedStorage", "-refreshNodes", "-finalizeUpgrade", "-metasave", "-refreshUserToGroupsMappings", "-printTopology", "-refreshNamenodes", "-deleteBlockPool", "-setBalancerBandwidth", "-fetchImage");
try {
for (String arg : args) assertTrue(ToolRunner.run(new DFSAdmin(), fillArgs(arg)) == -1);
assertTrue(ToolRunner.run(new DFSAdmin(), new String[] { "-help", "-some" }) == 0);
} catch (Exception e) {
fail("testDFSAdminHelp error" + e);
}
String pattern = "Usage: hdfs dfsadmin";
checkOutput(new String[] { "-cancel", "-renew" }, pattern, System.err, DFSAdmin.class);
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testMultipleRegistration.
@Test
public void testMultipleRegistration() throws Exception {
RefreshRegistry.defaultRegistry().register("sharedId", firstHandler);
RefreshRegistry.defaultRegistry().register("sharedId", secondHandler);
// this should trigger both
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "sharedId", "one" };
int exitCode = admin.run(args);
// -1 because one of the responses is unregistered
assertEquals(-1, exitCode);
// verify we called both
Mockito.verify(firstHandler).handleRefresh("sharedId", new String[] { "one" });
Mockito.verify(secondHandler).handleRefresh("sharedId", new String[] { "one" });
RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testInvalidCommand.
@Test
public void testInvalidCommand() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "nn" };
int exitCode = admin.run(args);
assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testVariableArgs.
@Test
public void testVariableArgs() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "secondHandler", "one" };
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 2", 2, exitCode);
exitCode = admin.run(new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "secondHandler", "one", "two" });
assertEquals("DFSAdmin should now return 3", 3, exitCode);
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[] { "one" });
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[] { "one", "two" });
}
Aggregations