Search in sources :

Example 1 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadUCFile.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadUCFile() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loaducfile";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loaducfile";
        String clientMachine = "testMachine_loaducfile";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 2 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadFile.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadFile() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loadfile";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loadfile";
        String clientMachine = "testMachine_loadfile";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        //after nonEcBlockUsingStripedID is deleted
        //the hasNonEcBlockUsingStripedID is set to false
        fs = cluster.getFileSystem();
        fs.delete(p, false);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertFalse(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 3 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadSnapshot.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadSnapshot() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loadSnapshot";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loadSnapshot";
        String clientMachine = "testMachine_loadSnapshot";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        Path d = new Path(testDir);
        fs.mkdir(d, new FsPermission("755"));
        fs.allowSnapshot(d);
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        fs.createSnapshot(d, "testHasNonEcBlockUsingStripeID");
        fs.truncate(p, 0);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 4 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class FSDirTruncateOp method prepareFileForTruncate.

/**
   * Convert current INode to UnderConstruction. Recreate lease. Create new
   * block for the truncated copy. Schedule truncation of the replicas.
   *
   * @param fsn namespace
   * @param iip inodes in the path containing the file
   * @param leaseHolder lease holder
   * @param clientMachine client machine info
   * @param lastBlockDelta last block delta size
   * @param newBlock new block
   * @return the returned block will be written to editLog and passed back
   *         into this method upon loading.
   * @throws IOException
   */
@VisibleForTesting
static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, String leaseHolder, String clientMachine, long lastBlockDelta, Block newBlock) throws IOException {
    assert fsn.hasWriteLock();
    INodeFile file = iip.getLastINode().asFile();
    assert !file.isStriped();
    file.recordModification(iip.getLatestSnapshotId());
    file.toUnderConstruction(leaseHolder, clientMachine);
    assert file.isUnderConstruction() : "inode should be under construction.";
    fsn.getLeaseManager().addLease(file.getFileUnderConstructionFeature().getClientName(), file.getId());
    boolean shouldRecoverNow = (newBlock == null);
    BlockInfo oldBlock = file.getLastBlock();
    boolean shouldCopyOnTruncate = shouldCopyOnTruncate(fsn, file, oldBlock);
    if (newBlock == null) {
        newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock(BlockType.CONTIGUOUS) : new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockManager().isLegacyBlock(oldBlock)));
    }
    final BlockInfo truncatedBlockUC;
    BlockManager blockManager = fsn.getFSDirectory().getBlockManager();
    if (shouldCopyOnTruncate) {
        // Add new truncateBlock into blocksMap and
        // use oldBlock as a source for copy-on-truncate recovery
        truncatedBlockUC = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
        truncatedBlockUC.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, blockManager.getStorages(oldBlock));
        truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
        truncatedBlockUC.getUnderConstructionFeature().setTruncateBlock(oldBlock);
        file.setLastBlock(truncatedBlockUC);
        blockManager.addBlockCollection(truncatedBlockUC, file);
        NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" + " size {}  new block {} old block {}", truncatedBlockUC.getNumBytes(), newBlock, oldBlock);
    } else {
        // Use new generation stamp for in-place truncate recovery
        blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
        oldBlock = file.getLastBlock();
        assert !oldBlock.isComplete() : "oldBlock should be under construction";
        BlockUnderConstructionFeature uc = oldBlock.getUnderConstructionFeature();
        uc.setTruncateBlock(new Block(oldBlock));
        uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
        uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp());
        truncatedBlockUC = oldBlock;
        NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: " + "{} Scheduling in-place block truncate to new size {}", uc, uc.getTruncateBlock().getNumBytes());
    }
    if (shouldRecoverNow) {
        truncatedBlockUC.getUnderConstructionFeature().initializeBlockRecovery(truncatedBlockUC, newBlock.getGenerationStamp());
    }
    return newBlock;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockUnderConstructionFeature(org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Block(org.apache.hadoop.hdfs.protocol.Block) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 5 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestCommitBlockSynchronization method makeNameSystemSpy.

private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) throws IOException {
    Configuration conf = new Configuration();
    FSEditLog editlog = mock(FSEditLog.class);
    FSImage image = new FSImage(conf);
    Whitebox.setInternalState(image, "editLog", editlog);
    final DatanodeStorageInfo[] targets = {};
    FSNamesystem namesystem = new FSNamesystem(conf, image);
    namesystem.setImageLoaded(true);
    // FSNamesystem's isFileDeleted() method will return false on this file
    if (file.getParent() == null) {
        INodeDirectory mparent = mock(INodeDirectory.class);
        INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0], mparent.getPermissionStatus(), mparent.getAccessTime());
        parent.setLocalName(new byte[0]);
        parent.addChild(file);
        file.setParent(parent);
    }
    namesystem.dir.getINodeMap().put(file);
    FSNamesystem namesystemSpy = spy(namesystem);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 1);
    blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
    blockInfo.setBlockCollectionId(file.getId());
    blockInfo.setGenerationStamp(genStamp);
    blockInfo.getUnderConstructionFeature().initializeBlockRecovery(blockInfo, genStamp);
    doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
    doReturn(true).when(file).isUnderConstruction();
    doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
    doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(blockInfo).when(file).getLastBlock();
    doNothing().when(namesystemSpy).closeFileCommitBlocks(any(String.class), any(INodeFile.class), any(BlockInfo.class));
    doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
    return namesystemSpy;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Configuration(org.apache.hadoop.conf.Configuration) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)19 Block (org.apache.hadoop.hdfs.protocol.Block)17 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)11 Test (org.junit.Test)9 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 Configuration (org.apache.hadoop.conf.Configuration)6 Path (org.apache.hadoop.fs.Path)6 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)4 PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 IOException (java.io.IOException)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1