Search in sources :

Example 41 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirectory method updateReplicationFactor.

/**
   * Tell the block manager to update the replication factors when delete
   * happens. Deleting a file or a snapshot might decrease the replication
   * factor of the blocks as the blocks are always replicated to the highest
   * replication factor among all snapshots.
   */
void updateReplicationFactor(Collection<UpdatedReplicationInfo> blocks) {
    BlockManager bm = getBlockManager();
    for (UpdatedReplicationInfo e : blocks) {
        BlockInfo b = e.block();
        bm.setReplication(b.getReplication(), e.targetReplication(), b);
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) UpdatedReplicationInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.UpdatedReplicationInfo)

Example 42 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirStatAndListingOp method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   * @throws IOException
   */
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
    Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
    Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
    BlockManager bm = fsd.getBlockManager();
    fsd.readLock();
    try {
        final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
        src = iip.getPath();
        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ);
            fsd.checkUnreadableBySuperuser(pc, iip);
        }
        final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
        boolean isUc = inode.isUnderConstruction();
        if (iip.isSnapshot()) {
            // if src indicates a snapshot file, we need to make sure the returned
            // blocks do not exceed the size of the snapshot file.
            length = Math.min(length, fileSize - offset);
            isUc = false;
        }
        final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
        final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
        final long now = now();
        boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
        return new GetBlockLocationsResult(updateAccessTime, blocks);
    } finally {
        fsd.readUnlock();
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 43 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class NameNodeRpcServer method blockReport.

// DatanodeProtocol
@Override
public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, String poolId, final StorageBlockReport[] reports, final BlockReportContext context) throws IOException {
    checkNNStartup();
    verifyRequest(nodeReg);
    if (blockStateChangeLog.isDebugEnabled()) {
        blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + ", reports.length=" + reports.length);
    }
    final BlockManager bm = namesystem.getBlockManager();
    boolean noStaleStorages = false;
    for (int r = 0; r < reports.length; r++) {
        final BlockListAsLongs blocks = reports[r].getBlocks();
        //
        // BlockManager.processReport accumulates information of prior calls
        // for the same node and storage, so the value returned by the last
        // call of this loop is the final updated value for noStaleStorage.
        //
        final int index = r;
        noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {

            @Override
            public Boolean call() throws IOException {
                return bm.processReport(nodeReg, reports[index].getStorage(), blocks, context);
            }
        });
        metrics.incrStorageBlockReportOps();
    }
    bm.removeBRLeaseIfNeeded(nodeReg, context);
    BlockManagerFaultInjector.getInstance().incomingBlockReportRpc(nodeReg, context);
    if (nn.getFSImage().isUpgradeFinalized() && !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) {
        return new FinalizeCommand(poolId);
    }
    return null;
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FinalizeCommand(org.apache.hadoop.hdfs.server.protocol.FinalizeCommand) Callable(java.util.concurrent.Callable)

Example 44 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class DFSTestUtil method getExpectedPrimaryNode.

/**
   * @return the node which is expected to run the recovery of the
   * given block, which is known to be under construction inside the
   * given NameNOde.
   */
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, ExtendedBlock blk) {
    BlockManager bm0 = nn.getNamesystem().getBlockManager();
    BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
    assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, !storedBlock.isComplete());
    // We expect that the replica with the most recent heart beat will be
    // the one to be in charge of the synchronization / recovery protocol.
    final DatanodeStorageInfo[] storages = storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    DatanodeStorageInfo expectedPrimary = storages[0];
    long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdateMonotonic();
    for (int i = 1; i < storages.length; i++) {
        final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdateMonotonic();
        if (lastUpdate > mostRecentLastUpdate) {
            expectedPrimary = storages[i];
            mostRecentLastUpdate = lastUpdate;
        }
    }
    return expectedPrimary.getDatanodeDescriptor();
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Example 45 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFileCreation method testFileCreationWithOverwrite.

/**
   * 1. Check the blocks of old file are cleaned after creating with overwrite
   * 2. Restart NN, check the file
   * 3. Save new checkpoint and restart NN, check the file
   */
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt("dfs.blocksize", blockSize);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    try {
        dfs.mkdirs(new Path("/foo/dir"));
        String file = "/foo/dir/file";
        Path filePath = new Path(file);
        // Case 1: Create file with overwrite, check the blocks of old file
        // are cleaned after creating with overwrite
        NameNode nn = cluster.getNameNode();
        FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
        BlockManager bm = fsn.getBlockManager();
        FSDataOutputStream out = dfs.create(filePath);
        byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
        try {
            out.write(oldData);
        } finally {
            out.close();
        }
        LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
        assertBlocks(bm, oldBlocks, true);
        out = dfs.create(filePath, true);
        byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
        try {
            out.write(newData);
        } finally {
            out.close();
        }
        dfs.deleteOnExit(filePath);
        LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
        assertBlocks(bm, newBlocks, true);
        assertBlocks(bm, oldBlocks, false);
        FSDataInputStream in = dfs.open(filePath);
        byte[] result = null;
        try {
            result = readAll(in);
        } finally {
            in.close();
        }
        Assert.assertArrayEquals(newData, result);
        // Case 2: Restart NN, check the file
        cluster.restartNameNode();
        nn = cluster.getNameNode();
        in = dfs.open(filePath);
        try {
            result = readAll(in);
        } finally {
            in.close();
        }
        Assert.assertArrayEquals(newData, result);
        // Case 3: Save new checkpoint and restart NN, check the file
        NameNodeAdapter.enterSafeMode(nn, false);
        NameNodeAdapter.saveNamespace(nn);
        cluster.restartNameNode();
        nn = cluster.getNameNode();
        in = dfs.open(filePath);
        try {
            result = readAll(in);
        } finally {
            in.close();
        }
        Assert.assertArrayEquals(newData, result);
    } finally {
        if (dfs != null) {
            dfs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6