use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDecommission method checkFile.
/**
* Verify that the number of replicas are as expected for each block in
* the given file.
* For blocks with a decommissioned node, verify that their replication
* is 1 more than what is specified.
* For blocks without decommissioned nodes, verify their replication is
* equal to what is specified.
*
* @param downnode - if null, there is no decommissioned node for this file.
* @return - null if no failure found, else an error message string.
*/
private static String checkFile(FileSystem fileSys, Path name, int repl, String downnode, int numDatanodes) throws IOException {
boolean isNodeDown = (downnode != null);
// need a raw stream
assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
Collection<LocatedBlock> dinfo = dis.getAllBlocks();
for (LocatedBlock blk : dinfo) {
// for each block
int hasdown = 0;
DatanodeInfo[] nodes = blk.getLocations();
for (int j = 0; j < nodes.length; j++) {
// for each replica
if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
hasdown++;
//Downnode must actually be decommissioned
if (!nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is given as downnode, " + "but is not decommissioned";
}
//Decommissioned node (if any) should only be last node in list.
if (j != nodes.length - 1) {
return "For block " + blk.getBlock() + " decommissioned node " + nodes[j] + " was not last node in list: " + (j + 1) + " of " + nodes.length;
}
LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j] + " is decommissioned.");
} else {
//Non-downnodes must not be decommissioned
if (nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is unexpectedly decommissioned";
}
}
}
LOG.info("Block " + blk.getBlock() + " has " + hasdown + " decommissioned replica.");
if (Math.min(numDatanodes, repl + hasdown) != nodes.length) {
return "Wrong number of replicas for block " + blk.getBlock() + ": " + nodes.length + ", expected " + Math.min(numDatanodes, repl + hasdown);
}
}
return null;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestDatanodeReport method testDatanodeReportWithUpgradeDomain.
/**
* This test verifies upgrade domain is set according to the JSON host file.
*/
@Test
public void testDatanodeReportWithUpgradeDomain() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/datanodeReport");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final DFSClient client = cluster.getFileSystem().dfs;
final String ud1 = "ud1";
final String ud2 = "ud2";
try {
//wait until the cluster is up
cluster.waitActive();
DatanodeAdminProperties datanode = new DatanodeAdminProperties();
datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
datanode.setUpgradeDomain(ud1);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
client.refreshNodes();
DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), ud1);
datanode.setUpgradeDomain(null);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
client.refreshNodes();
all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), null);
datanode.setUpgradeDomain(ud2);
hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
client.refreshNodes();
all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), ud2);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestBlockRecovery method testRaceBetweenReplicaRecoveryAndFinalizeBlock.
/**
* Test to verify the race between finalizeBlock and Lease recovery
*
* @throws Exception
*/
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
// Stop the Mocked DN started in startup()
tearDown();
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitClusterUp();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("data");
out.hsync();
List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
final LocatedBlock block = blocks.get(0);
final DataNode dataNode = cluster.getDataNodes().get(0);
final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
Thread recoveryThread = new Thread() {
@Override
public void run() {
try {
DatanodeInfo[] locations = block.getLocations();
final RecoveringBlock recoveringBlock = new RecoveringBlock(block.getBlock(), locations, block.getBlock().getGenerationStamp() + 1);
try (AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}
} catch (Exception e) {
recoveryInitResult.set(false);
}
}
};
recoveryThread.start();
try {
out.close();
} catch (IOException e) {
Assert.assertTrue("Writing should fail", e.getMessage().contains("are bad. Aborting..."));
} finally {
recoveryThread.join();
}
Assert.assertTrue("Recovery should be initiated successfully", recoveryInitResult.get());
dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock().getGenerationStamp() + 1, block.getBlock().getBlockId(), block.getBlockSize());
} finally {
if (null != cluster) {
cluster.shutdown();
cluster = null;
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestBlockRecovery method initRecoveringBlocks.
private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] { new DatanodeInfoBuilder().setNodeID(dn.getDNRegistrationForBP(block.getBlockPoolId())).build(), mockOtherDN };
RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
blocks.add(rBlock);
return blocks;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.
@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster to verify movement within
// datanode.
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
LocatedBlock locatedBlock = locatedBlocks.get(0);
ExtendedBlock block = locatedBlock.getBlock();
DatanodeInfo[] locations = locatedBlock.getLocations();
assertEquals(1, locations.length);
StorageType[] storageTypes = locatedBlock.getStorageTypes();
// current block should be written to DISK
assertTrue(storageTypes[0] == StorageType.DISK);
DatanodeInfo source = locations[0];
// move block to ARCHIVE by using same DataNodeInfo for source, proxy and
// destination so that movement happens within datanode
assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
// wait till namenode notified
Thread.sleep(3000);
locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
locatedBlock = locatedBlocks.get(0);
assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
} finally {
cluster.shutdown();
}
}
Aggregations