Search in sources :

Example 11 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestBlockManager method addUcBlockToBM.

private BlockInfo addUcBlockToBM(long blkId) {
    Block block = new Block(blkId);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 3);
    blockInfo.convertToBlockUnderConstruction(UNDER_CONSTRUCTION, null);
    long inodeId = ++mockINodeId;
    final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
    blockInfo.setBlockCollectionId(inodeId);
    bm.blocksMap.addBlockCollection(blockInfo, bc);
    doReturn(bc).when(fsn).getBlockCollection(inodeId);
    return blockInfo;
}
Also used : ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) TestINodeFile(org.apache.hadoop.hdfs.server.namenode.TestINodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 12 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestSnapshotDeletion method testDeleteCurrentFileDirectory.

/**
   * Test deleting a directory which is a descendant of a snapshottable
   * directory. In the test we need to cover the following cases:
   * 
   * <pre>
   * 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
   * 2. Delete current INodeFile/INodeDirectory while snapshots have been taken 
   *    on ancestor(s).
   * 3. Delete current INodeFileWithSnapshot.
   * 4. Delete current INodeDirectoryWithSnapshot.
   * </pre>
   */
@Test(timeout = 300000)
public void testDeleteCurrentFileDirectory() throws Exception {
    // create a folder which will be deleted before taking snapshots
    Path deleteDir = new Path(subsub, "deleteDir");
    Path deleteFile = new Path(deleteDir, "deleteFile");
    // create a directory that we will not change during the whole process.
    Path noChangeDirParent = new Path(sub, "noChangeDirParent");
    Path noChangeDir = new Path(noChangeDirParent, "noChangeDir");
    // create a file that we will not change in the future
    Path noChangeFile = new Path(noChangeDir, "noChangeFile");
    DFSTestUtil.createFile(hdfs, deleteFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    // we will change this file's metadata in the future
    Path metaChangeFile1 = new Path(subsub, "metaChangeFile1");
    DFSTestUtil.createFile(hdfs, metaChangeFile1, BLOCKSIZE, REPLICATION, seed);
    // another file, created under noChangeDir, whose metadata will be changed
    Path metaChangeFile2 = new Path(noChangeDir, "metaChangeFile2");
    DFSTestUtil.createFile(hdfs, metaChangeFile2, BLOCKSIZE, REPLICATION, seed);
    // Case 1: delete deleteDir before taking snapshots
    hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    checkQuotaUsageComputation(dir, 10, BLOCKSIZE * REPLICATION * 4);
    hdfs.delete(deleteDir, true);
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    // create snapshot s0
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    // after creating snapshot s0, create a directory tempdir under dir and then
    // delete dir immediately
    Path tempDir = new Path(dir, "tempdir");
    Path tempFile = new Path(tempDir, "tempfile");
    DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = temp.getBlocks();
    hdfs.delete(tempDir, true);
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
    // check blocks of tempFile
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // make a change: create a new file under subsub
    Path newFileAfterS0 = new Path(subsub, "newFile");
    DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
    // further change: change the replicator factor of metaChangeFile
    hdfs.setReplication(metaChangeFile1, REPLICATION_1);
    hdfs.setReplication(metaChangeFile2, REPLICATION_1);
    // create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    // get two snapshots for later use
    Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
    Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
    // Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
    // metaChangeFile2. Note that when we directly delete a directory, the 
    // directory will be converted to an INodeDirectoryWithSnapshot. To make
    // sure the deletion goes through an INodeDirectory, we delete the parent
    // of noChangeDir
    hdfs.delete(noChangeDirParent, true);
    // while deletion, we add a diff for metaChangeFile2 as its snapshot copy
    // for s1, we also add diffs for both sub and noChangeDirParent
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    // check the snapshot copy of noChangeDir 
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName() + "/" + noChangeDirParent.getName() + "/" + noChangeDir.getName());
    INodeDirectory snapshotNode = (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    INodeFile metaChangeFile2SCopy = children.get(0).asFile();
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertTrue(metaChangeFile2SCopy.isWithSnapshot());
    assertFalse(metaChangeFile2SCopy.isUnderConstruction());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir, metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 10L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
    for (BlockInfo b : blocks) {
        assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
    }
    // make sure the whole subtree of sub is stored correctly in snapshot
    Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1", sub.getName());
    INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString()).asDirectory();
    assertTrue(snapshotNode4Sub.isWithSnapshot());
    // the snapshot copy of sub has only one child subsub.
    // newFile should have been destroyed
    assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
    // but should have two children, subsub and noChangeDir, when s1 was taken  
    assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
    // check the snapshot copy of subsub, which is contained in the subtree of
    // sub's snapshot copy
    INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
    assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
    assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
    // check the children of subsub
    INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
    children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
    assertEquals(2, children.size());
    assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
    assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
    // only one child before snapshot s0 
    children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
    assertEquals(1, children.size());
    INode child = children.get(0);
    assertEquals(child.getLocalName(), metaChangeFile1.getName());
    // check snapshot copy of metaChangeFile1
    INodeFile metaChangeFile1SCopy = child.asFile();
    assertTrue(metaChangeFile1SCopy.isWithSnapshot());
    assertFalse(metaChangeFile1SCopy.isUnderConstruction());
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
    assertEquals(REPLICATION_1, metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
    assertEquals(REPLICATION, metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) INode(org.apache.hadoop.hdfs.server.namenode.INode) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 13 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestSnapshotReplication method checkFileReplication.

/**
   * Check the replication of a given file.
   *
   * @param file The given file
   * @param replication The expected replication number
   * @param blockReplication The expected replication number for the block
   * @throws Exception
   */
private void checkFileReplication(Path file, short replication, short blockReplication) throws Exception {
    // Get FileStatus of file1, and identify the replication number of file1.
    // Note that the replication number in FileStatus was derived from
    // INodeFile#getFileReplication().
    short fileReplication = hdfs.getFileStatus(file1).getReplication();
    assertEquals(replication, fileReplication);
    // Check the correctness of getPreferredBlockReplication()
    INode inode = fsdir.getINode(file1.toString());
    assertTrue(inode instanceof INodeFile);
    for (BlockInfo b : inode.asFile().getBlocks()) {
        assertEquals(blockReplication, b.getReplication());
    }
}
Also used : INode(org.apache.hadoop.hdfs.server.namenode.INode) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Example 14 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestFileAppend4 method testAppendInsufficientLocations.

/**
   * Test that an append with no locations fails with an exception
   * showing insufficient locations.
   */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file with replication 3
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
        // Shut down all DNs that have the last block location for the file
        LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
        List<DataNode> dnsOfCluster = cluster.getDataNodes();
        DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
        for (DataNode dn : dnsOfCluster) {
            for (DatanodeInfo loc : dnsWithLocations) {
                if (dn.getDatanodeId().equals(loc)) {
                    dn.shutdown();
                    DFSTestUtil.waitForDatanodeDeath(dn);
                }
            }
        }
        // Wait till 0 replication is recognized
        DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
        // have the block.
        try {
            fileSystem.append(f);
            fail("Append should fail because insufficient locations");
        } catch (IOException e) {
            LOG.info("Expected exception: ", e);
        }
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
        assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 15 with INodeFile

use of org.apache.hadoop.hdfs.server.namenode.INodeFile in project hadoop by apache.

the class TestBlockManager method addBlockOnNodes.

private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
    long inodeId = ++mockINodeId;
    final INodeFile bc = TestINodeFile.createINodeFile(inodeId);
    BlockInfo blockInfo = blockOnNodes(blockId, nodes);
    blockInfo.setReplication((short) 3);
    blockInfo.setBlockCollectionId(inodeId);
    Mockito.doReturn(bc).when(fsn).getBlockCollection(inodeId);
    bm.blocksMap.addBlockCollection(blockInfo, bc);
    return blockInfo;
}
Also used : ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) TestINodeFile(org.apache.hadoop.hdfs.server.namenode.TestINodeFile) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile)

Aggregations

INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)35 Path (org.apache.hadoop.fs.Path)24 Test (org.junit.Test)23 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)14 INodeDirectory (org.apache.hadoop.hdfs.server.namenode.INodeDirectory)10 INodesInPath (org.apache.hadoop.hdfs.server.namenode.INodesInPath)9 INode (org.apache.hadoop.hdfs.server.namenode.INode)7 FileStatus (org.apache.hadoop.fs.FileStatus)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 INodeReference (org.apache.hadoop.hdfs.server.namenode.INodeReference)5 TestINodeFile (org.apache.hadoop.hdfs.server.namenode.TestINodeFile)5 IOException (java.io.IOException)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)4 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 FSDirectory (org.apache.hadoop.hdfs.server.namenode.FSDirectory)3 DirectoryDiff (org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff)3