use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestAddStripedBlocks method testAllocateBlockId.
/**
* Make sure the IDs of striped blocks do not conflict
*/
@Test
public void testAllocateBlockId() throws Exception {
Path testPath = new Path("/testfile");
// create a file while allocates a new block
DFSTestUtil.writeFile(dfs, testPath, "hello, world!");
LocatedBlocks lb = dfs.getClient().getLocatedBlocks(testPath.toString(), 0);
final long firstId = lb.get(0).getBlock().getBlockId();
// delete the file
dfs.delete(testPath, true);
// allocate a new block, and make sure the new block's id does not conflict
// with the previous one
DFSTestUtil.writeFile(dfs, testPath, "hello again");
lb = dfs.getClient().getLocatedBlocks(testPath.toString(), 0);
final long secondId = lb.get(0).getBlock().getBlockId();
Assert.assertEquals(firstId + HdfsServerConstants.MAX_BLOCKS_IN_GROUP, secondId);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestReconstructStripedBlocks method testCountLiveReplicas.
/**
* make sure the NN can detect the scenario where there are enough number of
* internal blocks (>=9 by default) but there is still missing data/parity
* block.
*/
@Test
public void testCountLiveReplicas() throws Exception {
final HdfsConfiguration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
try {
fs.mkdirs(dirPath);
fs.setErasureCodingPolicy(dirPath, StripedFileTestUtil.getDefaultECPolicy().getName());
DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L);
// stop a dn
LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
DatanodeInfo dnToStop = block.getLocations()[0];
MiniDFSCluster.DataNodeProperties dnProp = cluster.stopDataNode(dnToStop.getXferAddr());
cluster.setDataNodeDead(dnToStop);
// wait for reconstruction to happen
DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000);
// bring the dn back: 10 internal blocks now
cluster.restartDataNode(dnProp);
cluster.waitActive();
// stop another dn: 9 internal blocks, but only cover 8 real one
dnToStop = block.getLocations()[1];
cluster.stopDataNode(dnToStop.getXferAddr());
cluster.setDataNodeDead(dnToStop);
// currently namenode is able to track the missing block. but restart NN
cluster.restartNameNode(true);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
FSNamesystem fsn = cluster.getNamesystem();
BlockManager bm = fsn.getBlockManager();
// wait 3 running cycles of redundancy monitor
Thread.sleep(3000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
// check if NN can detect the missing internal block and finish the
// reconstruction
StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize);
boolean reconstructed = false;
for (int i = 0; i < 5; i++) {
NumberReplicas num = null;
fsn.readLock();
try {
BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile().getLastBlock();
num = bm.countNodes(blockInfo);
} finally {
fsn.readUnlock();
}
if (num.liveReplicas() >= groupSize) {
reconstructed = true;
break;
} else {
Thread.sleep(1000);
}
}
Assert.assertTrue(reconstructed);
blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
block = (LocatedStripedBlock) blks.getLastLocatedBlock();
BitSet bitSet = new BitSet(groupSize);
for (byte index : block.getBlockIndices()) {
bitSet.set(index);
}
for (int i = 0; i < groupSize; i++) {
Assert.assertTrue(bitSet.get(i));
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method testPlacementAfterDecommission.
@Test(timeout = 300000)
public void testPlacementAfterDecommission() throws Exception {
final long fileSize = DEFAULT_BLOCK_SIZE * 5;
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
// Decommission some nodes and wait until decommissions have finished.
refreshDatanodeAdminProperties2();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
boolean successful = true;
LocatedBlocks locatedBlocks;
try {
locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
} catch (IOException ioe) {
return false;
}
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
Set<DatanodeInfo> locs = new HashSet<>();
for (DatanodeInfo datanodeInfo : block.getLocations()) {
if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
locs.add(datanodeInfo);
}
}
for (DatanodeID datanodeID : expectedDatanodeIDs) {
successful = successful && locs.contains(datanodeID);
}
}
return successful;
}
}, 1000, 60000);
// Verify block placement policy of each block.
LocatedBlocks locatedBlocks;
locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
BlockPlacementStatus status = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy().verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
assertTrue(status.isPlacementPolicySatisfied());
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method testPlacement.
@Test
public void testPlacement() throws Exception {
final long fileSize = DEFAULT_BLOCK_SIZE * 5;
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
LocatedBlocks locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
Set<DatanodeInfo> locs = new HashSet<>();
for (DatanodeInfo datanodeInfo : block.getLocations()) {
if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
locs.add(datanodeInfo);
}
}
for (DatanodeID datanodeID : expectedDatanodeIDs) {
assertTrue(locs.contains(datanodeID));
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hbase by apache.
the class TestBlockReorder method testFromDFS.
private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost) throws Exception {
// Multiple times as the order is random
for (int i = 0; i < 10; i++) {
LocatedBlocks l;
// The NN gets the block list asynchronously, so we may need multiple tries to get the list
final long max = System.currentTimeMillis() + 10000;
boolean done;
do {
Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
Assert.assertNotNull("Can't get block locations for " + src, l);
Assert.assertNotNull(l.getLocatedBlocks());
Assert.assertTrue(l.getLocatedBlocks().size() > 0);
done = true;
for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
done = (l.get(y).getLocations().length == repCount);
}
} while (!done);
for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
}
}
}
Aggregations