use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.
the class TestFileAppend4 method testAppendInsufficientLocations.
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
Configuration conf = new Configuration();
// lower heartbeat interval for fast recognition of DN
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem = null;
try {
// create a file with replication 3
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
// Shut down all DNs that have the last block location for the file
LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
List<DataNode> dnsOfCluster = cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
for (DataNode dn : dnsOfCluster) {
for (DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
// Wait till 0 replication is recognized
DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
// have the block.
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
} catch (IOException e) {
LOG.info("Expected exception: ", e);
}
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
assertTrue("File should remain closed", !inode.isUnderConstruction());
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.
the class DFSTestUtil method createStripedFile.
/**
* Creates the metadata of a file in striped layout. This method only
* manipulates the NameNode state without injecting data to DataNode.
* You should disable periodical heartbeat before use this.
* @param file Path of the file to create
* @param dir Parent path of the file
* @param numBlocks Number of striped block groups to add to the file
* @param numStripesPerBlk Number of striped cells in each block
* @param toMkdir
* @param ecPolicy erasure coding policy apply to created file. A null value
* means using default erasure coding policy.
*/
public static void createStripedFile(MiniDFSCluster cluster, Path file, Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir, ErasureCodingPolicy ecPolicy) throws Exception {
DistributedFileSystem dfs = cluster.getFileSystem();
// If outer test already set EC policy, dir should be left as null
if (toMkdir) {
assert dir != null;
dfs.mkdirs(dir);
try {
dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
} catch (IOException e) {
if (!e.getMessage().contains("non-empty directory")) {
throw e;
}
}
}
cluster.getNameNodeRpc().create(file.toString(), new FsPermission((short) 0755), dfs.getClient().getClientName(), new EnumSetWritable<>(EnumSet.of(CreateFlag.CREATE)), false, (short) 1, 128 * 1024 * 1024L, null);
FSNamesystem ns = cluster.getNamesystem();
FSDirectory fsdir = ns.getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
ExtendedBlock previous = null;
for (int i = 0; i < numBlocks; i++) {
Block newBlock = addBlockToFile(true, cluster.getDataNodes(), dfs, ns, file.toString(), fileNode, dfs.getClient().getClientName(), previous, numStripesPerBlk, 0);
previous = new ExtendedBlock(ns.getBlockPoolId(), newBlock);
}
dfs.getClient().namenode.complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
}
use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.
the class TestBlockManager method addCorruptBlockOnNodes.
private BlockInfo addCorruptBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) throws IOException {
long inodeId = ++mockINodeId;
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
blockInfo.setReplication((short) 3);
blockInfo.setBlockCollectionId(inodeId);
Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
bm.blocksMap.addBlockCollection(blockInfo, bc);
bm.markBlockReplicasAsCorrupt(blockInfo, blockInfo, blockInfo.getGenerationStamp() + 1, blockInfo.getNumBytes(), new DatanodeStorageInfo[] { nodes.get(0).getStorageInfos()[0] });
return blockInfo;
}
use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.
the class TestBlockManager method addBlockToBM.
private BlockInfo addBlockToBM(long blkId) {
Block block = new Block(blkId);
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
long inodeId = ++mockINodeId;
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
bm.blocksMap.addBlockCollection(blockInfo, bc);
blockInfo.setBlockCollectionId(inodeId);
doReturn(bc).when(fsn).getBlockCollection(inodeId);
return blockInfo;
}
use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.
the class TestBlockManager method addUcBlockToBM.
private BlockInfo addUcBlockToBM(long blkId) {
Block block = new Block(blkId);
BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null);
long inodeId = ++mockINodeId;
final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
blockInfo.setBlockCollectionId(inodeId);
bm.blocksMap.addBlockCollection(blockInfo, bc);
doReturn(bc).when(fsn).getBlockCollection(inodeId);
return blockInfo;
}
Aggregations