Search in sources :

Example 31 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class BlockManager method reportDiffSorted.

private void reportDiffSorted(DatanodeStorageInfo storageInfo, Iterable<BlockReportReplica> newReport, // add to DatanodeDescriptor
Collection<BlockInfoToAdd> toAdd, // remove from DatanodeDescriptor
Collection<BlockInfo> toRemove, // should be removed from DN
Collection<Block> toInvalidate, // add to corrupt replicas list
Collection<BlockToMarkCorrupt> toCorrupt, Collection<StatefulBlockInfo> toUC) {
    // add to under-construction list
    // The blocks must be sorted and the storagenodes blocks must be sorted
    Iterator<BlockInfo> storageBlocksIterator = storageInfo.getBlockIterator();
    DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
    BlockInfo storageBlock = null;
    for (BlockReportReplica replica : newReport) {
        long replicaID = replica.getBlockId();
        if (BlockIdManager.isStripedBlockID(replicaID) && (!hasNonEcBlockUsingStripedID || !blocksMap.containsBlock(replica))) {
            replicaID = BlockIdManager.convertToStripedID(replicaID);
        }
        ReplicaState reportedState = replica.getState();
        if (LOG.isDebugEnabled()) {
            LOG.debug("Reported block " + replica + " on " + dn + " size " + replica.getNumBytes() + " replicaState = " + reportedState);
        }
        if (shouldPostponeBlocksFromFuture && isGenStampInFuture(replica)) {
            queueReportedBlock(storageInfo, replica, reportedState, QUEUE_REASON_FUTURE_GENSTAMP);
            continue;
        }
        if (storageBlock == null && storageBlocksIterator.hasNext()) {
            storageBlock = storageBlocksIterator.next();
        }
        do {
            int cmp;
            if (storageBlock == null || (cmp = Long.compare(replicaID, storageBlock.getBlockId())) < 0) {
                // Check if block is available in NN but not yet on this storage
                BlockInfo nnBlock = blocksMap.getStoredBlock(new Block(replicaID));
                if (nnBlock != null) {
                    reportDiffSortedInner(storageInfo, replica, reportedState, nnBlock, toAdd, toCorrupt, toUC);
                } else {
                    // Replica not found anywhere so it should be invalidated
                    toInvalidate.add(new Block(replica));
                }
                break;
            } else if (cmp == 0) {
                // Replica matched current storageblock
                reportDiffSortedInner(storageInfo, replica, reportedState, storageBlock, toAdd, toCorrupt, toUC);
                storageBlock = null;
            } else {
                // Remove all stored blocks with IDs lower than replica
                do {
                    toRemove.add(storageBlock);
                    storageBlock = storageBlocksIterator.hasNext() ? storageBlocksIterator.next() : null;
                } while (storageBlock != null && Long.compare(replicaID, storageBlock.getBlockId()) > 0);
            }
        } while (storageBlock != null);
    }
    // Iterate any remaining blocks that have not been reported and remove them
    while (storageBlocksIterator.hasNext()) {
        toRemove.add(storageBlocksIterator.next());
    }
}
Also used : ReportedBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Block(org.apache.hadoop.hdfs.protocol.Block) CachedBlock(org.apache.hadoop.hdfs.server.namenode.CachedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaState(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState) StoredReplicaState(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.StoredReplicaState)

Example 32 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class ErasureCodingWork method createReplicationWork.

private void createReplicationWork(int sourceIndex, DatanodeStorageInfo target) {
    BlockInfoStriped stripedBlk = (BlockInfoStriped) getBlock();
    final byte blockIndex = liveBlockIndicies[sourceIndex];
    final DatanodeDescriptor source = getSrcNodes()[sourceIndex];
    final long internBlkLen = StripedBlockUtil.getInternalBlockLength(stripedBlk.getNumBytes(), stripedBlk.getCellSize(), stripedBlk.getDataBlockNum(), blockIndex);
    final Block targetBlk = new Block(stripedBlk.getBlockId() + blockIndex, internBlkLen, stripedBlk.getGenerationStamp());
    source.addBlockToBeReplicated(targetBlk, new DatanodeStorageInfo[] { target });
    if (BlockManager.LOG.isDebugEnabled()) {
        BlockManager.LOG.debug("Add replication task from source {} to " + "target {} for EC block {}", source, target, targetBlk);
    }
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 33 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestDFSShell method getMaterializedReplicas.

private static List<MaterializedReplica> getMaterializedReplicas(MiniDFSCluster cluster) throws IOException {
    List<MaterializedReplica> replicas = new ArrayList<>();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
    for (int i = 0; i < blocks.size(); i++) {
        Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
        for (Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
            for (Block b : e.getValue()) {
                replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b)));
            }
        }
    }
    return replicas;
}
Also used : MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StringContains.containsString(org.hamcrest.core.StringContains.containsString) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Map(java.util.Map)

Example 34 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndMissingStripedBlock.

// This test is going to be rewritten in HDFS-10854. Ignoring this test
// temporarily as it fails with the fix for HDFS-10301.
@Ignore
@Test
public void testProcessOverReplicatedAndMissingStripedBlock() throws Exception {
    long fileLen = cellSize * dataBlocks;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    // only inject GROUP_SIZE - 1 blocks, so there is one block missing
    for (int i = 0; i < groupSize - 1; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be over replicated with 2 redundant blocks.
    // Therefor number of internal blocks is over GROUP_SIZE. (5 data blocks +
    // 3 parity blocks  + 2 redundant blocks > GROUP_SIZE)
    blk.setBlockId(groupId + 2);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    Thread.sleep(2000);
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // Since one block is missing, then over-replicated blocks will not be
    // deleted until reconstruction happens
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    bg = (LocatedStripedBlock) (lbs.get(0));
    assertEquals(groupSize + 1, bg.getBlockIndices().length);
    assertEquals(groupSize + 1, bg.getLocations().length);
    BitSet set = new BitSet(groupSize);
    for (byte index : bg.getBlockIndices()) {
        set.set(index);
    }
    Assert.assertFalse(set.get(groupSize - 1));
    for (int i = 0; i < groupSize - 1; i++) {
        assertTrue(set.get(i));
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 35 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedSBSmallerThanFullBlocks.

@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception {
    // Create a EC file which doesn't fill full internal blocks.
    int fileLen = cellSize * (dataBlocks - 1);
    byte[] content = new byte[fileLen];
    DFSTestUtil.writeFile(fs, filePath, new String(content));
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    cluster.triggerBlockReports();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    // let a internal block be over replicated with (numDNs - GROUP_SIZE + 1)
    // redundant blocks. Therefor number of internal blocks is over GROUP_SIZE.
    blk.setBlockId(groupId);
    List<DataNode> dataNodeList = cluster.getDataNodes();
    for (int i = 0; i < numDNs; i++) {
        if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
            cluster.injectBlocks(i, Arrays.asList(blk), bpid);
            System.out.println("XXX: inject block into datanode " + i);
        }
    }
    // update blocksMap
    cluster.triggerBlockReports();
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10