Search in sources :

Example 11 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class SimulatedFSDataset method initReplicaRecovery.

@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException {
    ExtendedBlock b = rBlock.getBlock();
    final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
    BInfo binfo = map.get(b.getLocalBlock());
    if (binfo == null) {
        throw new IOException("No such Block " + b);
    }
    return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), binfo.getGenerationStamp(), binfo.isFinalized() ? ReplicaState.FINALIZED : ReplicaState.RBW);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) IOException(java.io.IOException)

Example 12 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class DatanodeManager method getBlockRecoveryCommand.

private BlockRecoveryCommand getBlockRecoveryCommand(String blockPoolId, DatanodeDescriptor nodeinfo) {
    BlockInfo[] blocks = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
    if (blocks == null) {
        return null;
    }
    BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.length);
    for (BlockInfo b : blocks) {
        BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
        assert uc != null;
        final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
        // Skip stale nodes during recovery
        final List<DatanodeStorageInfo> recoveryLocations = new ArrayList<>(storages.length);
        for (DatanodeStorageInfo storage : storages) {
            if (!storage.getDatanodeDescriptor().isStale(staleInterval)) {
                recoveryLocations.add(storage);
            }
        }
        // If we are performing a truncate recovery than set recovery fields
        // to old block.
        boolean truncateRecovery = uc.getTruncateBlock() != null;
        boolean copyOnTruncateRecovery = truncateRecovery && uc.getTruncateBlock().getBlockId() != b.getBlockId();
        ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) : new ExtendedBlock(blockPoolId, b);
        // If we only get 1 replica after eliminating stale nodes, choose all
        // replicas for recovery and let the primary data node handle failures.
        DatanodeInfo[] recoveryInfos;
        if (recoveryLocations.size() > 1) {
            if (recoveryLocations.size() != storages.length) {
                LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size()));
            }
            recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(recoveryLocations);
        } else {
            // If too many replicas are stale, then choose all replicas to
            // participate in block recovery.
            recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages);
        }
        RecoveringBlock rBlock;
        if (truncateRecovery) {
            Block recoveryBlock = (copyOnTruncateRecovery) ? b : uc.getTruncateBlock();
            rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, recoveryBlock);
        } else {
            rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, uc.getBlockRecoveryId());
            if (b.isStriped()) {
                rBlock = new RecoveringStripedBlock(rBlock, uc.getBlockIndices(), ((BlockInfoStriped) b).getErasureCodingPolicy());
            }
        }
        brCommand.add(rBlock);
    }
    return brCommand;
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) CachedBlock(org.apache.hadoop.hdfs.server.namenode.CachedBlock) RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)

Example 13 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class BlockRecoveryWorker method recoverBlocks.

public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) {
    Daemon d = new Daemon(datanode.threadGroup, new Runnable() {

        @Override
        public void run() {
            for (RecoveringBlock b : blocks) {
                try {
                    logRecoverBlock(who, b);
                    if (b.isStriped()) {
                        new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
                    } else {
                        new RecoveryTaskContiguous(b).recover();
                    }
                } catch (IOException e) {
                    LOG.warn("recoverBlocks FAILED: " + b, e);
                }
            }
        }
    });
    d.start();
    return d;
}
Also used : Daemon(org.apache.hadoop.util.Daemon) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) IOException(java.io.IOException)

Example 14 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class PBHelper method convert.

public static RecoveringBlock convert(RecoveringBlockProto b) {
    LocatedBlock lb = PBHelperClient.convertLocatedBlockProto(b.getBlock());
    RecoveringBlock rBlock;
    if (b.hasTruncateBlock()) {
        rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), PBHelperClient.convert(b.getTruncateBlock()));
    } else {
        rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), b.getNewGenStamp());
    }
    if (b.hasEcPolicy()) {
        assert b.hasBlockIndices();
        byte[] indices = b.getBlockIndices().toByteArray();
        rBlock = new RecoveringStripedBlock(rBlock, indices, PBHelperClient.convertErasureCodingPolicy(b.getEcPolicy()));
    }
    return rBlock;
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 15 with RecoveringBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.

the class TestPBHelper method testConvertRecoveringBlock.

@Test
public void testConvertRecoveringBlock() {
    DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
    RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
    RecoveringBlockProto bProto = PBHelper.convert(b);
    RecoveringBlock b1 = PBHelper.convert(bProto);
    assertEquals(b.getBlock(), b1.getBlock());
    DatanodeInfo[] dnInfo1 = b1.getLocations();
    assertEquals(dnInfo.length, dnInfo1.length);
    for (int i = 0; i < dnInfo.length; i++) {
        compare(dnInfo[0], dnInfo1[0]);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) RecoveringBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto) Test(org.junit.Test)

Aggregations

RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)20 Test (org.junit.Test)10 IOException (java.io.IOException)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)5 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)4 ArrayList (java.util.ArrayList)3 Path (org.apache.hadoop.fs.Path)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 BlockRecord (org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord)3 RecoveryInProgressException (org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)2 RecoveringBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 BlockRecoveryCommand (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand)2 RecoveringStripedBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock)2 DatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol)2