use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestDFSRename method testRenameWithOverwrite.
/**
* Check the blocks of dst file are cleaned after rename with overwrite
* Restart NN to check the rename successfully
*/
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
final short replFactor = 2;
final long blockSize = 512;
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
long fileLen = blockSize * 3;
String src = "/foo/src";
String dst = "/foo/dst";
Path srcPath = new Path(src);
Path dstPath = new Path(dst);
DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), dst, 0, fileLen);
BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
// Restart NN and check the rename successfully
cluster.restartNameNodes();
assertFalse(dfs.exists(srcPath));
assertTrue(dfs.exists(dstPath));
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndMissingStripedBlock.
// This test is going to be rewritten in HDFS-10854. Ignoring this test
// temporarily as it fails with the fix for HDFS-10301.
@Ignore
@Test
public void testProcessOverReplicatedAndMissingStripedBlock() throws Exception {
long fileLen = cellSize * dataBlocks;
DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
long gs = bg.getBlock().getGenerationStamp();
String bpid = bg.getBlock().getBlockPoolId();
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
// only inject GROUP_SIZE - 1 blocks, so there is one block missing
for (int i = 0; i < groupSize - 1; i++) {
blk.setBlockId(groupId + i);
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
}
cluster.triggerBlockReports();
// let a internal block be over replicated with 2 redundant blocks.
// Therefor number of internal blocks is over GROUP_SIZE. (5 data blocks +
// 3 parity blocks + 2 redundant blocks > GROUP_SIZE)
blk.setBlockId(groupId + 2);
cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
// update blocksMap
cluster.triggerBlockReports();
Thread.sleep(2000);
// add to invalidates
cluster.triggerHeartbeats();
// datanode delete block
cluster.triggerHeartbeats();
// update blocksMap
cluster.triggerBlockReports();
// Since one block is missing, then over-replicated blocks will not be
// deleted until reconstruction happens
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
bg = (LocatedStripedBlock) (lbs.get(0));
assertEquals(groupSize + 1, bg.getBlockIndices().length);
assertEquals(groupSize + 1, bg.getLocations().length);
BitSet set = new BitSet(groupSize);
for (byte index : bg.getBlockIndices()) {
set.set(index);
}
Assert.assertFalse(set.get(groupSize - 1));
for (int i = 0; i < groupSize - 1; i++) {
assertTrue(set.get(i));
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestMover method createFileWithFavoredDatanodes.
private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException {
// Adding two DISK based data node to the cluster.
// Also, ensure that blocks are pinned in these new data nodes.
StorageType[][] newtypes = new StorageType[][] { { StorageType.DISK }, { StorageType.DISK } };
startAdditionalDNs(conf, 2, newtypes, cluster);
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
InetSocketAddress[] favoredNodes = new InetSocketAddress[2];
int j = 0;
for (int i = dataNodes.size() - 1; i >= 2; i--) {
favoredNodes[j++] = dataNodes.get(i).getXferAddress();
}
final String file = "/parent/testMoverFailedRetryWithPinnedBlocks2";
final FSDataOutputStream out = dfs.create(new Path(file), FsPermission.getDefault(), true, DEFAULT_BLOCK_SIZE, (short) 2, DEFAULT_BLOCK_SIZE, null, favoredNodes);
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
out.write(fileData);
out.close();
// Mock FsDatasetSpi#getPinning to show that the block is pinned.
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
LocatedBlock lb = locatedBlocks.get(0);
DatanodeInfo datanodeInfo = lb.getLocations()[0];
for (DataNode dn : cluster.getDataNodes()) {
if (dn.getDatanodeId().getDatanodeUuid().equals(datanodeInfo.getDatanodeUuid())) {
LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
break;
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestStorageMover method waitForAllReplicas.
private void waitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem dfs, int retryCount) throws Exception {
LOG.info("Waiting for replicas count " + expectedReplicaNum + ", file name: " + file);
for (int i = 0; i < retryCount; i++) {
LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, BLOCK_SIZE);
LocatedBlock lb = lbs.get(0);
if (lb.getLocations().length >= expectedReplicaNum) {
return;
} else {
Thread.sleep(1000);
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedSBSmallerThanFullBlocks.
@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception {
// Create a EC file which doesn't fill full internal blocks.
int fileLen = cellSize * (dataBlocks - 1);
byte[] content = new byte[fileLen];
DFSTestUtil.writeFile(fs, filePath, new String(content));
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
long gs = bg.getBlock().getGenerationStamp();
String bpid = bg.getBlock().getBlockPoolId();
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
cluster.triggerBlockReports();
List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
// let a internal block be over replicated with (numDNs - GROUP_SIZE + 1)
// redundant blocks. Therefor number of internal blocks is over GROUP_SIZE.
blk.setBlockId(groupId);
List<DataNode> dataNodeList = cluster.getDataNodes();
for (int i = 0; i < numDNs; i++) {
if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
System.out.println("XXX: inject block into datanode " + i);
}
}
// update blocksMap
cluster.triggerBlockReports();
// add to invalidates
cluster.triggerHeartbeats();
// datanode delete block
cluster.triggerHeartbeats();
// update blocksMap
cluster.triggerBlockReports();
// verify that all internal blocks exists
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
}
Aggregations