use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.
the class TestBlockStoragePolicy method testChooseTargetWithTopology.
@Test
public void testChooseTargetWithTopology() throws Exception {
BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1", new StorageType[] { StorageType.SSD, StorageType.DISK, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2", new StorageType[] { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE }, new StorageType[] {}, new StorageType[] {});
final String[] racks = { "/d1/r1", "/d1/r2", "/d1/r2" };
final String[] hosts = { "host1", "host2", "host3" };
final StorageType[] types = { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE };
final DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, types);
final DatanodeDescriptor[] dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy1, null);
System.out.println(Arrays.asList(targets));
Assert.assertEquals(3, targets.length);
targets = replicator.chooseTarget("/foo", 3, dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false, new HashSet<Node>(), 0, policy2, null);
System.out.println(Arrays.asList(targets));
Assert.assertEquals(3, targets.length);
}
use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.
the class TestFsck method testFsckFileNotFound.
/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short numReplicas = 1;
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String, String[]> pmap = new HashMap<>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
FSDirectory fsd = mock(FSDirectory.class);
BlockManager blockManager = mock(BlockManager.class);
DatanodeManager dnManager = mock(DatanodeManager.class);
INodesInPath iip = mock(INodesInPath.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockManager()).thenReturn(blockManager);
when(fsName.getFSDirectory()).thenReturn(fsd);
when(fsd.getFSNamesystem()).thenReturn(fsName);
when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 * 1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte[] symlink = null;
byte[] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
try {
fsck.check(pathString, file, replRes, ecRes);
} catch (Exception e) {
fail("Unexpected exception " + e.getMessage());
}
assertTrue(replRes.isHealthy());
}
use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.
the class TestFsck method testFsckMissingReplicas.
/**
* Tests that the # of missing block replicas and expected replicas is
* correct.
* @throws IOException
*/
@Test
public void testFsckMissingReplicas() throws IOException {
// Desired replication factor
// Set this higher than numReplicas so it's under-replicated
final short replFactor = 2;
// Number of replicas to actually start
final short numReplicas = 1;
// Number of blocks to write
final short numBlocks = 3;
// Set a small-ish blocksize
final long blockSize = 512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
DistributedFileSystem dfs = null;
// Startup a minicluster
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
// Create a file that will be intentionally under-replicated
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
long fileLen = blockSize * numBlocks;
DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
// Create an under-replicated file
NameNode namenode = cluster.getNameNode();
NetworkTopology nettop = cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
Map<String, String[]> pmap = new HashMap<String, String[]>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
// Run the fsck and check the Result
final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);
fsck.check(pathString, file, replRes, ecRes);
// Also print the output from the fsck, for ex post facto sanity checks
System.out.println(result.toString());
assertEquals(replRes.missingReplicas, (numBlocks * replFactor) - (numBlocks * numReplicas));
assertEquals(replRes.numExpectedReplicas, numBlocks * replFactor);
}
use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.
the class TestBlockManager method removeNode.
private void removeNode(DatanodeDescriptor deadNode) {
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
cluster.remove(deadNode);
bm.removeBlocksAssociatedTo(deadNode);
}
use of org.apache.hadoop.net.NetworkTopology in project hadoop by apache.
the class BlockManagerSafeMode method leaveSafeMode.
/**
* Leave start up safe mode.
*
* @param force - true to force exit
* @return true if it leaves safe mode successfully else false
*/
boolean leaveSafeMode(boolean force) {
assert namesystem.hasWriteLock() : "Leaving safe mode needs write lock!";
final long bytesInFuture = numberOfBytesInFutureBlocks.get();
if (bytesInFuture > 0) {
if (force) {
LOG.warn("Leaving safe mode due to forceExit. This will cause a data " + "loss of {} byte(s).", bytesInFuture);
numberOfBytesInFutureBlocks.set(0);
} else {
LOG.error("Refusing to leave safe mode without a force flag. " + "Exiting safe mode will cause a deletion of {} byte(s). Please " + "use -forceExit flag to exit safe mode forcefully if data loss is" + " acceptable.", bytesInFuture);
return false;
}
} else if (force) {
LOG.warn("forceExit used when normal exist would suffice. Treating " + "force exit as normal safe mode exit.");
}
// In the standby, do not populate repl queues
if (!blockManager.isPopulatingReplQueues() && blockManager.shouldPopulateReplQueues()) {
blockManager.initializeReplQueues();
}
if (status != BMSafeModeStatus.OFF) {
NameNode.stateChangeLog.info("STATE* Safe mode is OFF");
}
status = BMSafeModeStatus.OFF;
final long timeInSafemode = monotonicNow() - startTime;
NameNode.stateChangeLog.info("STATE* Leaving safe mode after {} secs", timeInSafemode / 1000);
NameNode.getNameNodeMetrics().setSafeModeTime(timeInSafemode);
final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
NameNode.stateChangeLog.info("STATE* Network topology has {} racks and {}" + " datanodes", nt.getNumOfRacks(), nt.getNumOfLeaves());
NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has {} blocks", blockManager.numOfUnderReplicatedBlocks());
namesystem.startSecretManagerIfNecessary();
// If startup has not yet completed, end safemode phase.
StartupProgress prog = NameNode.getStartupProgress();
if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
prog.endStep(Phase.SAFEMODE, BlockManagerSafeMode.STEP_AWAITING_REPORTED_BLOCKS);
prog.endPhase(Phase.SAFEMODE);
}
return true;
}
Aggregations