use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestFSImage method testSupportBlockGroup.
/**
* Ensure that FSImage supports BlockGroup.
*/
@Test(timeout = 60000)
public void testSupportBlockGroup() throws Exception {
final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits());
final int BLOCK_SIZE = 8 * 1024 * 1024;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
DFSTestUtil.enableAllECPolicies(conf);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path parentDir = new Path("/ec-10-4");
Path childDir = new Path(parentDir, "ec-3-2");
ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID);
// Create directories and files
fs.mkdirs(parentDir);
fs.mkdirs(childDir);
fs.setErasureCodingPolicy(parentDir, testECPolicy.getName());
fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
Path file_10_4 = new Path(parentDir, "striped_file_10_4");
Path file_3_2 = new Path(childDir, "striped_file_3_2");
// Write content to files
byte[] bytes = StripedFileTestUtil.generateBytes(BLOCK_SIZE);
DFSTestUtil.writeFile(fs, file_10_4, new String(bytes));
DFSTestUtil.writeFile(fs, file_3_2, new String(bytes));
// Save namespace and restart NameNode
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNodes();
fs = cluster.getFileSystem();
assertTrue(fs.exists(file_10_4));
assertTrue(fs.exists(file_3_2));
// check the information of file_10_4
FSNamesystem fsn = cluster.getNamesystem();
INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
assertTrue(inode.isStriped());
assertEquals(testECPolicy.getId(), inode.getErasureCodingPolicyID());
BlockInfo[] blks = inode.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(testECPolicy.getId(), fs.getErasureCodingPolicy(file_10_4).getId());
assertEquals(testECPolicy.getId(), ((BlockInfoStriped) blks[0]).getErasureCodingPolicy().getId());
assertEquals(testECPolicy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(testECPolicy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
byte[] content = DFSTestUtil.readFileAsBytes(fs, file_10_4);
assertArrayEquals(bytes, content);
// check the information of file_3_2
inode = fsn.dir.getINode(file_3_2.toString()).asFile();
assertTrue(inode.isStriped());
assertEquals(ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID).getId(), inode.getErasureCodingPolicyID());
blks = inode.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(ec32Policy.getId(), fs.getErasureCodingPolicy(file_3_2).getId());
assertEquals(ec32Policy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(ec32Policy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
content = DFSTestUtil.readFileAsBytes(fs, file_3_2);
assertArrayEquals(bytes, content);
// check the EC policy on parent Dir
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicy(parentDir.toString());
assertNotNull(ecPolicy);
assertEquals(testECPolicy.getId(), ecPolicy.getId());
// check the EC policy on child Dir
ecPolicy = fsn.getErasureCodingPolicy(childDir.toString());
assertNotNull(ecPolicy);
assertEquals(ec32Policy.getId(), ecPolicy.getId());
// check the EC policy on root directory
ecPolicy = fsn.getErasureCodingPolicy("/");
assertNull(ecPolicy);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithClose.
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
// Repeat the call to make sure it returns true
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
completedBlockInfo.setBlockCollectionId(file.getId());
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn(completedBlockInfo).when(file).getLastBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, true, false, newTargets, null);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestCommitBlockSynchronization method testCommitBlockSynchronization.
@Test
public void testCommitBlockSynchronization() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
// Simulate 'completing' the block.
BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
completedBlockInfo.setBlockCollectionId(file.getId());
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn(completedBlockInfo).when(file).getLastBlock();
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, false, newTargets, null);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestINodeFile method createINodeFiles.
/**
* Creates the required number of files with one block each
* @param nCount Number of INodes to create
* @return Array of INode files
*/
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
if (nCount <= 0)
return new INodeFile[1];
replication = 3;
preferredBlockSize = 128 * 1024 * 1024;
INodeFile[] iNodes = new INodeFile[nCount];
for (int i = 0; i < nCount; i++) {
iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, preferredBlockSize);
iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
BlockInfo newblock = new BlockInfoContiguous(replication);
iNodes[i].addBlock(newblock);
}
return iNodes;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestFsck method testBlockIdCKDecommission.
/**
* Test for blockIdCK with datanode decommission.
*/
@Test
public void testBlockIdCKDecommission() throws Exception {
final short replFactor = 1;
short numDn = 2;
final long blockSize = 512;
boolean checkDecommissionInProgress = false;
String[] racks = { "/rack1", "/rack2" };
String[] hosts = { "host1", "host2" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
DistributedFileSystem dfs;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
util.createFile(dfs, path, 1024, replFactor, 1000L);
util.waitReplication(dfs, path, replFactor);
StringBuilder sb = new StringBuilder();
for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
}
String[] bIds = sb.toString().split(" ");
//make sure datanode that has replica is fine before decommission
String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
//decommission datanode
FSNamesystem fsn = cluster.getNameNode().getNamesystem();
BlockManager bm = fsn.getBlockManager();
ExtendedBlock eb = util.getFirstBlock(dfs, path);
BlockCollection bc = null;
try {
fsn.writeLock();
BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
bc = fsn.getBlockCollection(bi);
} finally {
fsn.writeUnlock();
}
DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
bm.getDatanodeManager().getDecomManager().startDecommission(dn);
String dnName = dn.getXferAddr();
//wait for decommission start
DatanodeInfo datanodeInfo = null;
int count = 0;
do {
Thread.sleep(2000);
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
//check decommissioning only once
if (!checkDecommissionInProgress && datanodeInfo != null && datanodeInfo.isDecommissionInProgress()) {
String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
checkDecommissionInProgress = true;
}
} while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
//check decommissioned
String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
}
Aggregations