use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.
@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster to verify movement within
// datanode.
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
LocatedBlock locatedBlock = locatedBlocks.get(0);
ExtendedBlock block = locatedBlock.getBlock();
DatanodeInfo[] locations = locatedBlock.getLocations();
assertEquals(1, locations.length);
StorageType[] storageTypes = locatedBlock.getStorageTypes();
// current block should be written to DISK
assertTrue(storageTypes[0] == StorageType.DISK);
DatanodeInfo source = locations[0];
// move block to ARCHIVE by using same DataNodeInfo for source, proxy and
// destination so that movement happens within datanode
assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
// wait till namenode notified
Thread.sleep(3000);
locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
// get the current
locatedBlock = locatedBlocks.get(0);
assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlockReplacement method testDeletedBlockWhenAddBlockIsInEdit.
/**
* Standby namenode doesn't queue Delete block request when the add block
* request is in the edit log which are yet to be read.
* @throws Exception
*/
@Test
public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
DFSClient client = null;
try {
cluster.waitActive();
assertEquals("Number of namenodes is not 2", 2, cluster.getNumNameNodes());
// Transitioning the namenode 0 to active.
cluster.transitionToActive(0);
assertTrue("Namenode 0 should be in active state", cluster.getNameNode(0).isActiveState());
assertTrue("Namenode 1 should be in standby state", cluster.getNameNode(1).isStandbyState());
// Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
// to true.
DataNodeTestUtils.triggerHeartbeat(cluster.getDataNodes().get(0));
FileSystem fs = cluster.getFileSystem(0);
// Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale
// to false.
cluster.getDataNodes().get(0).triggerBlockReport(new BlockReportOptions.Factory().setIncremental(false).build());
Path fileName = new Path("/tmp.txt");
// create a file with one block
DFSTestUtil.createFile(fs, fileName, 10L, (short) 1, 1234L);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
client = new DFSClient(cluster.getFileSystem(0).getUri(), conf);
List<LocatedBlock> locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
assertTrue(locatedBlocks.size() == 1);
assertTrue(locatedBlocks.get(0).getLocations().length == 1);
// add a second datanode to the cluster
cluster.startDataNodes(conf, 1, true, null, null, null, null);
assertEquals("Number of datanodes should be 2", 2, cluster.getDataNodes().size());
DataNode dn0 = cluster.getDataNodes().get(0);
DataNode dn1 = cluster.getDataNodes().get(1);
String activeNNBPId = cluster.getNamesystem(0).getBlockPoolId();
DatanodeDescriptor sourceDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn0.getDNRegistrationForBP(activeNNBPId));
DatanodeDescriptor destDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn1.getDNRegistrationForBP(activeNNBPId));
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
LOG.info("replaceBlock: " + replaceBlock(block, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) destDnDesc));
// Waiting for the FsDatasetAsyncDsikService to delete the block
for (int tries = 0; tries < 20; tries++) {
Thread.sleep(1000);
// Triggering the deletion block report to report the deleted block
// to namnemode
DataNodeTestUtils.triggerDeletionReport(cluster.getDataNodes().get(0));
locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
// If block was deleted and only on 1 datanode then break out
if (locatedBlocks.get(0).getLocations().length == 1) {
break;
}
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue("Namenode 1 should be in active state", cluster.getNameNode(1).isActiveState());
assertTrue("Namenode 0 should be in standby state", cluster.getNameNode(0).isStandbyState());
client.close();
// Opening a new client for new active namenode
client = new DFSClient(cluster.getFileSystem(1).getUri(), conf);
List<LocatedBlock> locatedBlocks1 = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
assertEquals(1, locatedBlocks1.size());
assertEquals("The block should be only on 1 datanode ", 1, locatedBlocks1.get(0).getLocations().length);
} finally {
IOUtils.cleanup(null, client);
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlockReplacement method testBlockReplacementWithPinnedBlocks.
/**
* Test to verify that the copying of pinned block to a different destination
* datanode will throw IOException with error code Status.ERROR_BLOCK_PINNED.
*
*/
@Test(timeout = 90000)
public void testBlockReplacementWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
// create only one datanode in the cluster with DISK and ARCHIVE storage
// types.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
String fileName = "/testBlockReplacementWithPinnedBlocks/file";
final Path file = new Path(fileName);
DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
LocatedBlock lb = dfs.getClient().getLocatedBlocks(fileName, 0).get(0);
DatanodeInfo[] oldNodes = lb.getLocations();
assertEquals("Wrong block locations", oldNodes.length, 1);
DatanodeInfo source = oldNodes[0];
ExtendedBlock b = lb.getBlock();
DatanodeInfo[] datanodes = dfs.getDataNodeStats();
DatanodeInfo destin = null;
for (DatanodeInfo datanodeInfo : datanodes) {
// choose different destination node
if (!oldNodes[0].equals(datanodeInfo)) {
destin = datanodeInfo;
break;
}
}
assertNotNull("Failed to choose destination datanode!", destin);
assertFalse("Source and destin datanode should be different", source.equals(destin));
// Mock FsDatasetSpi#getPinning to show that the block is pinned.
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
DataNode dn = cluster.getDataNodes().get(i);
LOG.info("Simulate block pinning in datanode " + dn);
DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
}
// Block movement to a different datanode should fail as the block is
// pinned.
assertTrue("Status code mismatches!", replaceBlock(b, source, source, destin, StorageType.ARCHIVE, Status.ERROR_BLOCK_PINNED));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestBlockScanner method testCorruptBlockHandling.
@Test(timeout = 120000)
public void testCorruptBlockHandling() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER, TestScanResultHandler.class.getName());
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 5;
final int CORRUPT_INDEX = 3;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 4);
ExtendedBlock badBlock = ctx.getFileBlock(0, CORRUPT_INDEX);
ctx.cluster.corruptBlockOnDataNodes(badBlock);
final TestScanResultHandler.Info info = TestScanResultHandler.getInfo(ctx.volumes.get(0));
synchronized (info) {
info.shouldRun = true;
info.notify();
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
return info.blocksScanned == NUM_EXPECTED_BLOCKS;
}
}
}, 3, 30000);
synchronized (info) {
assertTrue(info.badBlocks.contains(badBlock));
for (int i = 0; i < NUM_EXPECTED_BLOCKS; i++) {
if (i != CORRUPT_INDEX) {
ExtendedBlock block = ctx.getFileBlock(0, i);
assertTrue(info.goodBlocks.contains(block));
}
}
}
ctx.close();
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestReplicationPolicyWithUpgradeDomain method testVerifyBlockPlacement.
/**
* Test block placement verification.
* @throws Exception
*/
@Test
public void testVerifyBlockPlacement() throws Exception {
LocatedBlock locatedBlock;
BlockPlacementStatus status;
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
List<DatanodeStorageInfo> set = new ArrayList<>();
// 2 upgrade domains (not enough), 2 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[4]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertFalse(status.isPlacementPolicySatisfied());
// 3 upgrade domains (enough), 2 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[5]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertTrue(status.isPlacementPolicySatisfied());
// 3 upgrade domains (enough), 1 rack (not enough)
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[2]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertFalse(status.isPlacementPolicySatisfied());
assertFalse(status.getErrorDescription().contains("upgrade domain"));
// 2 upgrade domains( not enough), 3 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[5]);
set.add(storages[8]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertFalse(status.isPlacementPolicySatisfied());
assertTrue(status.getErrorDescription().contains("upgrade domain"));
// 3 upgrade domains (enough), 3 racks (enough)
set.clear();
set.add(storages[0]);
set.add(storages[4]);
set.add(storages[8]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertTrue(status.isPlacementPolicySatisfied());
// 3 upgrade domains (enough), 3 racks (enough), 4 replicas
set.clear();
set.add(storages[0]);
set.add(storages[1]);
set.add(storages[5]);
set.add(storages[8]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertTrue(status.isPlacementPolicySatisfied());
// 2 upgrade domains (not enough), 3 racks (enough), 4 replicas
set.clear();
set.add(storages[0]);
set.add(storages[3]);
set.add(storages[5]);
set.add(storages[8]);
locatedBlock = BlockManager.newLocatedBlock(b, set.toArray(new DatanodeStorageInfo[set.size()]), 0, false);
status = replicator.verifyBlockPlacement(locatedBlock.getLocations(), set.size());
assertFalse(status.isPlacementPolicySatisfied());
}
Aggregations