use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestAddStripedBlocks method testGetLocatedStripedBlocks.
@Test
public void testGetLocatedStripedBlocks() throws Exception {
final Path file = new Path("/file1");
// create an empty file
FSDataOutputStream out = null;
try {
out = dfs.create(file, (short) 1);
writeAndFlushStripedOutputStream((DFSStripedOutputStream) out.getWrappedStream(), DFS_BYTES_PER_CHECKSUM_DEFAULT);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
BlockInfoStriped lastBlk = (BlockInfoStriped) fileNode.getLastBlock();
DatanodeInfo[] expectedDNs = DatanodeStorageInfo.toDatanodeInfos(lastBlk.getUnderConstructionFeature().getExpectedStorageLocations());
byte[] indices = lastBlk.getUnderConstructionFeature().getBlockIndices();
LocatedBlocks blks = dfs.getClient().getLocatedBlocks(file.toString(), 0L);
Assert.assertEquals(1, blks.locatedBlockCount());
LocatedBlock lblk = blks.get(0);
Assert.assertTrue(lblk instanceof LocatedStripedBlock);
DatanodeInfo[] datanodes = lblk.getLocations();
byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices();
Assert.assertEquals(groupSize, datanodes.length);
Assert.assertEquals(groupSize, blockIndices.length);
Assert.assertArrayEquals(indices, blockIndices);
Assert.assertArrayEquals(expectedDNs, datanodes);
} finally {
IOUtils.cleanup(null, out);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestFSEditLogLoader method testUpdateStripedBlocks.
@Test
public void testUpdateStripedBlocks() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/ec";
String testFile = "testfile_002";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser2";
String clientMachine = "testMachine2";
long blkId = 1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
short blockNum = (short) testECPolicy.getNumDataUnits();
short parityNum = (short) testECPolicy.getNumParityUnits();
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
//create a file with striped blocks
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(stripedBlk);
fns.getEditLog().logAddBlock(testFilePath, file);
TestINodeFile.toCompleteFile(file);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
fns.leaveSafeMode(false);
//update the last block
long newBlkNumBytes = 1024 * 8;
long newTimestamp = 1426222918 + 3600;
file.toUnderConstruction(clientName, clientMachine);
file.getLastBlock().setNumBytes(newBlkNumBytes);
file.getLastBlock().setGenerationStamp(newTimestamp);
fns.getEditLog().logUpdateBlocks(testFilePath, file, true);
TestINodeFile.toCompleteFile(file);
//After the namenode restarts if the block by loaded is the same as above
//(new block size and timestamp) it means that we have successfully
//applied the edit log to the fsimage.
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
assertTrue(inodeLoaded.isStriped());
BlockInfo[] blks = inodeLoaded.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(blkId, blks[0].getBlockId());
assertEquals(newBlkNumBytes, blks[0].getNumBytes());
assertEquals(newTimestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestFSImage method testSupportBlockGroup.
/**
* Ensure that FSImage supports BlockGroup.
*/
@Test(timeout = 60000)
public void testSupportBlockGroup() throws Exception {
final short GROUP_SIZE = (short) (testECPolicy.getNumDataUnits() + testECPolicy.getNumParityUnits());
final int BLOCK_SIZE = 8 * 1024 * 1024;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
DFSTestUtil.enableAllECPolicies(conf);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path parentDir = new Path("/ec-10-4");
Path childDir = new Path(parentDir, "ec-3-2");
ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID);
// Create directories and files
fs.mkdirs(parentDir);
fs.mkdirs(childDir);
fs.setErasureCodingPolicy(parentDir, testECPolicy.getName());
fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
Path file_10_4 = new Path(parentDir, "striped_file_10_4");
Path file_3_2 = new Path(childDir, "striped_file_3_2");
// Write content to files
byte[] bytes = StripedFileTestUtil.generateBytes(BLOCK_SIZE);
DFSTestUtil.writeFile(fs, file_10_4, new String(bytes));
DFSTestUtil.writeFile(fs, file_3_2, new String(bytes));
// Save namespace and restart NameNode
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNodes();
fs = cluster.getFileSystem();
assertTrue(fs.exists(file_10_4));
assertTrue(fs.exists(file_3_2));
// check the information of file_10_4
FSNamesystem fsn = cluster.getNamesystem();
INodeFile inode = fsn.dir.getINode(file_10_4.toString()).asFile();
assertTrue(inode.isStriped());
assertEquals(testECPolicy.getId(), inode.getErasureCodingPolicyID());
BlockInfo[] blks = inode.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(testECPolicy.getId(), fs.getErasureCodingPolicy(file_10_4).getId());
assertEquals(testECPolicy.getId(), ((BlockInfoStriped) blks[0]).getErasureCodingPolicy().getId());
assertEquals(testECPolicy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(testECPolicy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
byte[] content = DFSTestUtil.readFileAsBytes(fs, file_10_4);
assertArrayEquals(bytes, content);
// check the information of file_3_2
inode = fsn.dir.getINode(file_3_2.toString()).asFile();
assertTrue(inode.isStriped());
assertEquals(ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.RS_3_2_POLICY_ID).getId(), inode.getErasureCodingPolicyID());
blks = inode.getBlocks();
assertEquals(1, blks.length);
assertTrue(blks[0].isStriped());
assertEquals(ec32Policy.getId(), fs.getErasureCodingPolicy(file_3_2).getId());
assertEquals(ec32Policy.getNumDataUnits(), ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(ec32Policy.getNumParityUnits(), ((BlockInfoStriped) blks[0]).getParityBlockNum());
content = DFSTestUtil.readFileAsBytes(fs, file_3_2);
assertArrayEquals(bytes, content);
// check the EC policy on parent Dir
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicy(parentDir.toString());
assertNotNull(ecPolicy);
assertEquals(testECPolicy.getId(), ecPolicy.getId());
// check the EC policy on child Dir
ecPolicy = fsn.getErasureCodingPolicy(childDir.toString());
assertNotNull(ecPolicy);
assertEquals(ec32Policy.getId(), ecPolicy.getId());
// check the EC policy on root directory
ecPolicy = fsn.getErasureCodingPolicy("/");
assertNull(ecPolicy);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestFSImage method testSaveAndLoadStripedINodeFile.
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException {
// Construct an INode with StripedBlock for saving and loading
fsn.setErasureCodingPolicy("/", testECPolicy.getName(), false);
long id = 123456789;
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
PermissionStatus permissionStatus = new PermissionStatus("testuser_a", "testuser_groups", new FsPermission((short) 0x755));
long mtime = 1426222916 - 3600;
long atime = 1426222916;
BlockInfoContiguous[] blocks = new BlockInfoContiguous[0];
byte erasureCodingPolicyID = testECPolicy.getId();
long preferredBlockSize = 128 * 1024 * 1024;
INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime, blocks, null, erasureCodingPolicyID, preferredBlockSize, (byte) 0, BlockType.STRIPED);
ByteArrayOutputStream bs = new ByteArrayOutputStream();
// Construct StripedBlocks for the INode
BlockInfoStriped[] stripedBlocks = new BlockInfoStriped[3];
long stripedBlkId = 10000001;
long timestamp = mtime + 3600;
for (int i = 0; i < stripedBlocks.length; i++) {
stripedBlocks[i] = new BlockInfoStriped(new Block(stripedBlkId + i, preferredBlockSize, timestamp), testECPolicy);
file.addBlock(stripedBlocks[i]);
}
final String client = "testClient";
final String clientMachine = "testClientMachine";
final String path = "testUnderConstructionPath";
// Save the INode to byte array
DataOutput out = new DataOutputStream(bs);
if (isUC) {
file.toUnderConstruction(client, clientMachine);
FSImageSerialization.writeINodeUnderConstruction((DataOutputStream) out, file, path);
} else {
FSImageSerialization.writeINodeFile(file, out, false);
}
DataInput in = new DataInputStream(new ByteArrayInputStream(bs.toByteArray()));
// load the INode from the byte array
INodeFile fileByLoaded;
if (isUC) {
fileByLoaded = FSImageSerialization.readINodeUnderConstruction(in, fsn, fsn.getFSImage().getLayoutVersion());
} else {
fileByLoaded = (INodeFile) new FSImageFormat.Loader(conf, fsn).loadINodeWithLocalName(false, in, false);
}
assertEquals(id, fileByLoaded.getId());
assertArrayEquals(isUC ? path.getBytes() : name, fileByLoaded.getLocalName().getBytes());
assertEquals(permissionStatus.getUserName(), fileByLoaded.getPermissionStatus().getUserName());
assertEquals(permissionStatus.getGroupName(), fileByLoaded.getPermissionStatus().getGroupName());
assertEquals(permissionStatus.getPermission(), fileByLoaded.getPermissionStatus().getPermission());
assertEquals(mtime, fileByLoaded.getModificationTime());
assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
// TODO for striped blocks, we currently save and load them as contiguous
// blocks to/from legacy fsimage
assertEquals(3, fileByLoaded.getBlocks().length);
assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
assertEquals(file.getFileReplication(), fileByLoaded.getFileReplication());
if (isUC) {
assertEquals(client, fileByLoaded.getFileUnderConstructionFeature().getClientName());
assertEquals(clientMachine, fileByLoaded.getFileUnderConstructionFeature().getClientMachine());
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestReconstructStripedBlocks method doTestMissingStripedBlock.
/**
* Start GROUP_SIZE + 1 datanodes.
* Inject striped blocks to first GROUP_SIZE datanodes.
* Then make numOfBusy datanodes busy, make numOfMissed datanodes missed.
* Then trigger BlockManager to compute reconstruction works. (so all
* reconstruction work will be scheduled to the last datanode)
* Finally, verify the reconstruction work of the last datanode.
*/
private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) throws Exception {
Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1).build();
try {
cluster.waitActive();
final int numBlocks = 4;
DFSTestUtil.createStripedFile(cluster, filePath, dirPath, numBlocks, 1, true);
// all blocks will be located at first GROUP_SIZE DNs, the last DN is
// empty because of the util function createStripedFile
// make sure the file is complete in NN
final INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile();
assertFalse(fileNode.isUnderConstruction());
assertTrue(fileNode.isStriped());
BlockInfo[] blocks = fileNode.getBlocks();
assertEquals(numBlocks, blocks.length);
for (BlockInfo blk : blocks) {
assertTrue(blk.isStriped());
assertTrue(blk.isComplete());
assertEquals(cellSize * dataBlocks, blk.getNumBytes());
final BlockInfoStriped sb = (BlockInfoStriped) blk;
assertEquals(groupSize, sb.numNodes());
}
final BlockManager bm = cluster.getNamesystem().getBlockManager();
BlockInfo firstBlock = fileNode.getBlocks()[0];
DatanodeStorageInfo[] storageInfos = bm.getStorages(firstBlock);
// make numOfBusy nodes busy
int i = 0;
for (; i < numOfBusy; i++) {
DatanodeDescriptor busyNode = storageInfos[i].getDatanodeDescriptor();
for (int j = 0; j < maxReplicationStreams + 1; j++) {
BlockManagerTestUtil.addBlockToBeReplicated(busyNode, new Block(j), new DatanodeStorageInfo[] { storageInfos[0] });
}
}
// make numOfMissed internal blocks missed
for (; i < numOfBusy + numOfMissed; i++) {
DatanodeDescriptor missedNode = storageInfos[i].getDatanodeDescriptor();
assertEquals(numBlocks, missedNode.numBlocks());
bm.getDatanodeManager().removeDatanode(missedNode);
}
BlockManagerTestUtil.getComputedDatanodeWork(bm);
// all the reconstruction work will be scheduled on the last DN
DataNode lastDn = cluster.getDataNodes().get(groupSize);
DatanodeDescriptor last = bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
assertEquals("Counting the number of outstanding EC tasks", numBlocks, last.getNumberOfBlocksToBeErasureCoded());
List<BlockECReconstructionInfo> reconstruction = last.getErasureCodeCommand(numBlocks);
for (BlockECReconstructionInfo info : reconstruction) {
assertEquals(1, info.getTargetDnInfos().length);
assertEquals(last, info.getTargetDnInfos()[0]);
assertEquals(info.getSourceDnInfos().length, info.getLiveBlockIndices().length);
if (groupSize - numOfMissed == dataBlocks) {
// It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen
// to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction
// work.
assertEquals(dataBlocks, info.getSourceDnInfos().length);
} else {
// The block has no highest priority, so we don't use the busy DNs as
// sources
assertEquals(groupSize - numOfMissed - numOfBusy, info.getSourceDnInfos().length);
}
}
} finally {
cluster.shutdown();
}
}
Aggregations