use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestNetworkTopology method testInvalidNetworkTopologiesNotCachedInHdfs.
@Test(timeout = 180000)
public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
// bad rack topology
String[] racks = { "/a/b", "/c" };
String[] hosts = { "foo1.example.com", "foo2.example.com" };
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn = cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
// Wait for one DataNode to register.
// The other DataNode will not be able to register up because of the rack mismatch.
DatanodeInfo[] info;
while (true) {
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
// Set the network topology of the other node to the match the network
// topology of the node that came up.
int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx = validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location " + info[0].getNetworkLocation());
// Restart the DN with the invalid topology and wait for it to register.
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
} else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at " + info[0].getNetworkLocation() + ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(), info[1].getNetworkLocation());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestBlockUnderConstruction method testGetBlockLocations.
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for (int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertFalse(blockManager.getStoredBlock(b).isComplete());
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class NNThroughputBenchmark method run.
/**
* Main method of the benchmark.
* @param aArgs command line parameters
*/
// Tool
@Override
public int run(String[] aArgs) throws Exception {
List<String> args = new ArrayList<String>(Arrays.asList(aArgs));
if (args.size() < 2 || !args.get(0).startsWith("-op"))
printUsage();
String type = args.get(1);
boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
final URI nnUri = FileSystem.getDefaultUri(config);
// Start the NameNode
String[] argv = new String[] {};
List<OperationStatsBase> ops = new ArrayList<OperationStatsBase>();
OperationStatsBase opStat = null;
try {
if (runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) {
opStat = new CreateFileStats(args);
ops.add(opStat);
}
if (runAll || MkdirsStats.OP_MKDIRS_NAME.equals(type)) {
opStat = new MkdirsStats(args);
ops.add(opStat);
}
if (runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
opStat = new OpenFileStats(args);
ops.add(opStat);
}
if (runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) {
opStat = new DeleteFileStats(args);
ops.add(opStat);
}
if (runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
opStat = new FileStatusStats(args);
ops.add(opStat);
}
if (runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
opStat = new RenameFileStats(args);
ops.add(opStat);
}
if (runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) {
opStat = new BlockReportStats(args);
ops.add(opStat);
}
if (runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
if (nnUri.getScheme() != null && nnUri.getScheme().equals("hdfs")) {
LOG.warn("The replication test is ignored as it does not support " + "standalone namenode in another process or on another host. ");
} else {
opStat = new ReplicationStats(args);
ops.add(opStat);
}
}
if (runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) {
opStat = new CleanAllStats(args);
ops.add(opStat);
}
if (ops.isEmpty()) {
printUsage();
}
if (nnUri.getScheme() == null || nnUri.getScheme().equals("file")) {
LOG.info("Remote NameNode is not specified. Creating one.");
FileSystem.setDefaultUri(config, "hdfs://localhost:0");
config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
nameNode = NameNode.createNameNode(argv, config);
NamenodeProtocols nnProtos = nameNode.getRpcServer();
nameNodeProto = nnProtos;
clientProto = nnProtos;
dataNodeProto = nnProtos;
refreshUserMappingsProto = nnProtos;
bpid = nameNode.getNamesystem().getBlockPoolId();
} else {
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(getConf());
nameNodeProto = DFSTestUtil.getNamenodeProtocolProxy(config, nnUri, UserGroupInformation.getCurrentUser());
clientProto = dfs.getClient().getNamenode();
dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(DFSUtilClient.getNNAddress(nnUri), config);
refreshUserMappingsProto = DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
getBlockPoolId(dfs);
}
// run each benchmark
for (OperationStatsBase op : ops) {
LOG.info("Starting benchmark: " + op.getOpName());
op.benchmark();
op.cleanUp();
}
// print statistics
for (OperationStatsBase op : ops) {
LOG.info("");
op.printResults();
}
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
}
return 0;
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestCacheDirectives method testWaitForCachedReplicas.
@Test(timeout = 120000)
public void testWaitForCachedReplicas() throws Exception {
FileSystemTestHelper helper = new FileSystemTestHelper();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
}
}, 500, 60000);
// Send a cache report referring to a bogus block. It is important that
// the NameNode be robust against this.
NamenodeProtocols nnRpc = namenode.getRpcServer();
DataNode dn0 = cluster.getDataNodes().get(0);
String bpid = cluster.getNamesystem().getBlockPoolId();
LinkedList<Long> bogusBlockIds = new LinkedList<Long>();
bogusBlockIds.add(999999L);
nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);
Path rootDir = helper.getDefaultWorkingDirectory(dfs);
// Create the pool
final String pool = "friendlyPool";
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
// Create some test files
final int numFiles = 2;
final int numBlocksPerFile = 2;
final List<String> paths = new ArrayList<String>(numFiles);
for (int i = 0; i < numFiles; i++) {
Path p = new Path(rootDir, "testCachePaths-" + i);
FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int) BLOCK_SIZE);
paths.add(p.toUri().getPath());
}
// Check the initial statistics at the namenode
waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
// Cache and check each path in sequence
int expected = 0;
for (int i = 0; i < numFiles; i++) {
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
expected += numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1");
}
// Check that the datanodes have the right cache values
DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
long totalUsed = 0;
for (DatanodeInfo dn : live) {
final long cacheCapacity = dn.getCacheCapacity();
final long cacheUsed = dn.getCacheUsed();
final long cacheRemaining = dn.getCacheRemaining();
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining);
assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining);
totalUsed += cacheUsed;
}
assertEquals(expected * BLOCK_SIZE, totalUsed);
// Uncache and check each path in sequence
RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
for (int i = 0; i < numFiles; i++) {
CacheDirectiveEntry entry = entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected -= numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2");
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestCheckpoint method testSecondaryHasVeryOutOfDateImage.
/**
* Test case where the secondary does a checkpoint, then stops for a while.
* In the meantime, the NN saves its image several times, so that the
* logs that connect the 2NN's old checkpoint to the current txid
* get archived. Then, the 2NN tries to checkpoint again.
*/
@Test
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN saves namespace 3 times
NamenodeProtocols nn = cluster.getNameNodeRpc();
nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
for (int i = 0; i < 3; i++) {
nn.saveNamespace(0, 0);
}
nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// Now the secondary tries to checkpoint again with its
// old image in memory.
secondary.doCheckpoint();
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
Aggregations