Search in sources :

Example 26 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirAttrOp method unprotectedSetReplication.

static BlockInfo[] unprotectedSetReplication(FSDirectory fsd, INodesInPath iip, short replication) throws QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException, UnsupportedActionException {
    assert fsd.hasWriteLock();
    final BlockManager bm = fsd.getBlockManager();
    final INode inode = iip.getLastINode();
    if (inode == null || !inode.isFile() || inode.asFile().isStriped()) {
        // TODO we do not support replication on stripe layout files yet
        return null;
    }
    INodeFile file = inode.asFile();
    // Make sure the directory has sufficient quotas
    short oldBR = file.getPreferredBlockReplication();
    long size = file.computeFileSize(true, true);
    // Ensure the quota does not exceed
    if (oldBR < replication) {
        fsd.updateCount(iip, 0L, size, oldBR, replication, true);
    }
    file.setFileReplication(replication, iip.getLatestSnapshotId());
    short targetReplication = (short) Math.max(replication, file.getPreferredBlockReplication());
    if (oldBR > replication) {
        fsd.updateCount(iip, 0L, size, oldBR, targetReplication, true);
    }
    for (BlockInfo b : file.getBlocks()) {
        bm.setReplication(oldBR, targetReplication, b);
    }
    if (oldBR != -1) {
        if (oldBR > targetReplication) {
            FSDirectory.LOG.info("Decreasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
        } else {
            FSDirectory.LOG.info("Increasing replication from {} to {} for {}", oldBR, targetReplication, iip.getPath());
        }
    }
    return file.getBlocks();
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)

Example 27 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirectory method updateReplicationFactor.

/**
   * Tell the block manager to update the replication factors when delete
   * happens. Deleting a file or a snapshot might decrease the replication
   * factor of the blocks as the blocks are always replicated to the highest
   * replication factor among all snapshots.
   */
void updateReplicationFactor(Collection<UpdatedReplicationInfo> blocks) {
    BlockManager bm = getBlockManager();
    for (UpdatedReplicationInfo e : blocks) {
        BlockInfo b = e.block();
        bm.setReplication(b.getReplication(), e.targetReplication(), b);
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) UpdatedReplicationInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.UpdatedReplicationInfo)

Example 28 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirStatAndListingOp method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   * @throws IOException
   */
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
    Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
    Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
    BlockManager bm = fsd.getBlockManager();
    fsd.readLock();
    try {
        final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
        src = iip.getPath();
        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ);
            fsd.checkUnreadableBySuperuser(pc, iip);
        }
        final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
        boolean isUc = inode.isUnderConstruction();
        if (iip.isSnapshot()) {
            // if src indicates a snapshot file, we need to make sure the returned
            // blocks do not exceed the size of the snapshot file.
            length = Math.min(length, fileSize - offset);
            isUc = false;
        }
        final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
        final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
        final long now = now();
        boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
        return new GetBlockLocationsResult(updateAccessTime, blocks);
    } finally {
        fsd.readUnlock();
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 29 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class NameNodeRpcServer method blockReport.

// DatanodeProtocol
@Override
public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, String poolId, final StorageBlockReport[] reports, final BlockReportContext context) throws IOException {
    checkNNStartup();
    verifyRequest(nodeReg);
    if (blockStateChangeLog.isDebugEnabled()) {
        blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + ", reports.length=" + reports.length);
    }
    final BlockManager bm = namesystem.getBlockManager();
    boolean noStaleStorages = false;
    for (int r = 0; r < reports.length; r++) {
        final BlockListAsLongs blocks = reports[r].getBlocks();
        //
        // BlockManager.processReport accumulates information of prior calls
        // for the same node and storage, so the value returned by the last
        // call of this loop is the final updated value for noStaleStorage.
        //
        final int index = r;
        noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {

            @Override
            public Boolean call() throws IOException {
                return bm.processReport(nodeReg, reports[index].getStorage(), blocks, context);
            }
        });
        metrics.incrStorageBlockReportOps();
    }
    bm.removeBRLeaseIfNeeded(nodeReg, context);
    BlockManagerFaultInjector.getInstance().incomingBlockReportRpc(nodeReg, context);
    if (nn.getFSImage().isUpgradeFinalized() && !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) {
        return new FinalizeCommand(poolId);
    }
    return null;
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FinalizeCommand(org.apache.hadoop.hdfs.server.protocol.FinalizeCommand) Callable(java.util.concurrent.Callable)

Example 30 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class DFSTestUtil method getExpectedPrimaryNode.

/**
   * @return the node which is expected to run the recovery of the
   * given block, which is known to be under construction inside the
   * given NameNOde.
   */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, ExtendedBlock blk) {
    BlockManager bm0 = nn.getNamesystem().getBlockManager();
    BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
    assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, !storedBlock.isComplete());
    // We expect that the replica with the most recent heart beat will be
    // the one to be in charge of the synchronization / recovery protocol.
    final DatanodeStorageInfo[] storages = storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    DatanodeStorageInfo expectedPrimary = storages[0];
    long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdateMonotonic();
    for (int i = 1; i < storages.length; i++) {
        final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdateMonotonic();
        if (lastUpdate > mostRecentLastUpdate) {
            expectedPrimary = storages[i];
            mostRecentLastUpdate = lastUpdate;
        }
    }
    return expectedPrimary.getDatanodeDescriptor();
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6