Search in sources :

Example 6 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class FSEditLogLoader method updateBlocks.

/**
   * Update in-memory data structures with new block information.
   * @throws IOException
   */
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, INodesInPath iip, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
    // Update its block list
    BlockInfo[] oldBlocks = file.getBlocks();
    Block[] newBlocks = op.getBlocks();
    String path = op.getPath();
    // Are we only updating the last block's gen stamp.
    boolean isGenStampUpdate = oldBlocks.length == newBlocks.length;
    // First, update blocks in common
    for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
        BlockInfo oldBlock = oldBlocks[i];
        Block newBlock = newBlocks[i];
        boolean isLastBlock = i == newBlocks.length - 1;
        if (oldBlock.getBlockId() != newBlock.getBlockId() || (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && !(isGenStampUpdate && isLastBlock))) {
            throw new IOException("Mismatched block IDs or generation stamps, " + "attempting to replace block " + oldBlock + " with " + newBlock + " as block # " + i + "/" + newBlocks.length + " of " + path);
        }
        oldBlock.setNumBytes(newBlock.getNumBytes());
        boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp();
        oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
        if (!oldBlock.isComplete() && (!isLastBlock || op.shouldCompleteLastBlock())) {
            changeMade = true;
            fsNamesys.getBlockManager().forceCompleteBlock(oldBlock);
        }
        if (changeMade) {
            // The state or gen-stamp of the block has changed. So, we may be
            // able to process some messages from datanodes that we previously
            // were unable to process.
            fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
        }
    }
    if (newBlocks.length < oldBlocks.length) {
        // We're removing a block from the file, e.g. abandonBlock(...)
        if (!file.isUnderConstruction()) {
            throw new IOException("Trying to remove a block from file " + path + " which is not under construction.");
        }
        if (newBlocks.length != oldBlocks.length - 1) {
            throw new IOException("Trying to remove more than one block from file " + path);
        }
        Block oldBlock = oldBlocks[oldBlocks.length - 1];
        boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(fsDir, path, iip, file, oldBlock);
        if (!removed && !(op instanceof UpdateBlocksOp)) {
            throw new IOException("Trying to delete non-existant block " + oldBlock);
        }
    } else if (newBlocks.length > oldBlocks.length) {
        final boolean isStriped = ecPolicy != null;
        // We're adding blocks
        for (int i = oldBlocks.length; i < newBlocks.length; i++) {
            Block newBlock = newBlocks[i];
            final BlockInfo newBI;
            if (!op.shouldCompleteLastBlock()) {
                // until several blocks in?
                if (isStriped) {
                    newBI = new BlockInfoStriped(newBlock, ecPolicy);
                } else {
                    newBI = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
                }
                newBI.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
            } else {
                // OP_ADD operations as each block is allocated.
                if (isStriped) {
                    newBI = new BlockInfoStriped(newBlock, ecPolicy);
                } else {
                    newBI = new BlockInfoContiguous(newBlock, file.getFileReplication());
                }
            }
            fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file);
            file.addBlock(newBI);
            fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
        }
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) UpdateBlocksOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 7 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class FSEditLogLoader method addNewBlock.

/**
   * Add a new block into the given INodeFile
   */
private void addNewBlock(AddBlockOp op, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
    BlockInfo[] oldBlocks = file.getBlocks();
    Block pBlock = op.getPenultimateBlock();
    Block newBlock = op.getLastBlock();
    if (pBlock != null) {
        // the penultimate block is not null
        assert oldBlocks != null && oldBlocks.length > 0;
        // compare pBlock with the last block of oldBlocks
        BlockInfo oldLastBlock = oldBlocks[oldBlocks.length - 1];
        if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
            throw new IOException("Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock);
        }
        oldLastBlock.setNumBytes(pBlock.getNumBytes());
        if (!oldLastBlock.isComplete()) {
            fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
            fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
        }
    } else {
        // the penultimate block is null
        Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
    }
    // add the new block
    final BlockInfo newBlockInfo;
    boolean isStriped = ecPolicy != null;
    if (isStriped) {
        newBlockInfo = new BlockInfoStriped(newBlock, ecPolicy);
    } else {
        newBlockInfo = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
    }
    newBlockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
    fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBlockInfo, file);
    file.addBlock(newBlockInfo);
    fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 8 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class FSImageSerialization method readINodeUnderConstruction.

// Helper function that reads in an INodeUnderConstruction
// from the input stream
//
static INodeFile readINodeUnderConstruction(DataInput in, FSNamesystem fsNamesys, int imgVersion) throws IOException {
    byte[] name = readBytes(in);
    long inodeId = NameNodeLayoutVersion.supports(LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong() : fsNamesys.dir.allocateNewInodeId();
    short blockReplication = in.readShort();
    long modificationTime = in.readLong();
    long preferredBlockSize = in.readLong();
    int numBlocks = in.readInt();
    final BlockInfoContiguous[] blocksContiguous = new BlockInfoContiguous[numBlocks];
    Block blk = new Block();
    int i = 0;
    for (; i < numBlocks - 1; i++) {
        blk.readFields(in);
        blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
    }
    // last block is UNDER_CONSTRUCTION
    if (numBlocks > 0) {
        blk.readFields(in);
        blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
        blocksContiguous[i].convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
    }
    PermissionStatus perm = PermissionStatus.read(in);
    String clientName = readString(in);
    String clientMachine = readString(in);
    // We previously stored locations for the last block, now we
    // just record that there are none
    int numLocs = in.readInt();
    assert numLocs == 0 : "Unexpected block locations";
    // Images in the pre-protobuf format will not have the lazyPersist flag,
    // so it is safe to pass false always.
    INodeFile file = new INodeFile(inodeId, name, perm, modificationTime, modificationTime, blocksContiguous, blockReplication, preferredBlockSize);
    file.toUnderConstruction(clientName, clientMachine);
    return file;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) Block(org.apache.hadoop.hdfs.protocol.Block) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 9 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFSEditLogLoader method testHasNonEcBlockUsingStripedIDForUpdateBlocks.

@Test
public void testHasNonEcBlockUsingStripedIDForUpdateBlocks() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_002";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser2";
        String clientMachine = "testMachine1";
        long blkId = 100;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        long newBlkNumBytes = 1024 * 8;
        long newTimestamp = 1426222918 + 3600;
        file.toUnderConstruction(clientName, clientMachine);
        file.getLastBlock().setBlockId(-100);
        file.getLastBlock().setNumBytes(newBlkNumBytes);
        file.getLastBlock().setGenerationStamp(newTimestamp);
        fns.getEditLog().logUpdateBlocks(testFilePath, file, true);
        TestINodeFile.toCompleteFile(file);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 10 with BlockInfoContiguous

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.

the class TestFSEditLogLoader method testHasNonEcBlockUsingStripedIDForAddBlock.

@Test
public void testHasNonEcBlockUsingStripedIDForAddBlock() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_addblock";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_addblock";
        String clientMachine = "testMachine_addblock";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        //check whether the hasNonEcBlockUsingStripedID is set
        //after loading a addblock-editlog
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        fns.getEditLog().logAddBlock(testFilePath, file);
        TestINodeFile.toCompleteFile(file);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)19 Block (org.apache.hadoop.hdfs.protocol.Block)17 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)11 Test (org.junit.Test)9 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 Configuration (org.apache.hadoop.conf.Configuration)6 Path (org.apache.hadoop.fs.Path)6 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)4 PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 IOException (java.io.IOException)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1