Search in sources :

Example 1 with RecoveringStripedBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.

the class PBHelper method convert.

public static RecoveringBlockProto convert(RecoveringBlock b) {
    if (b == null) {
        return null;
    }
    LocatedBlockProto lb = PBHelperClient.convertLocatedBlock(b);
    RecoveringBlockProto.Builder builder = RecoveringBlockProto.newBuilder();
    builder.setBlock(lb).setNewGenStamp(b.getNewGenerationStamp());
    if (b.getNewBlock() != null)
        builder.setTruncateBlock(PBHelperClient.convert(b.getNewBlock()));
    if (b instanceof RecoveringStripedBlock) {
        RecoveringStripedBlock sb = (RecoveringStripedBlock) b;
        builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(sb.getErasureCodingPolicy()));
        builder.setBlockIndices(PBHelperClient.getByteString(sb.getBlockIndices()));
    }
    return builder.build();
}
Also used : LocatedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)

Example 2 with RecoveringStripedBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.

the class TestBlockRecovery method testSafeLength.

@Test(timeout = 60000)
public void testSafeLength() throws Exception {
    // hard coded policy to work with hard coded test suite
    ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemPolicies()[0];
    RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, new byte[9], ecPolicy);
    BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);
    BlockRecoveryWorker.RecoveryTaskStriped recoveryTask = recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
    for (int i = 0; i < blockLengthsSuite.length; i++) {
        int[] blockLengths = blockLengthsSuite[i][0];
        int safeLength = blockLengthsSuite[i][1][0];
        Map<Long, BlockRecord> syncList = new HashMap<>();
        for (int id = 0; id < blockLengths.length; id++) {
            ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id, blockLengths[id], 0, null);
            syncList.put((long) id, new BlockRecord(null, null, rInfo));
        }
        Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength, recoveryTask.getSafeLength(syncList));
    }
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) HashMap(java.util.HashMap) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Matchers.anyLong(org.mockito.Matchers.anyLong) Test(org.junit.Test)

Example 3 with RecoveringStripedBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.

the class DatanodeManager method getBlockRecoveryCommand.

private BlockRecoveryCommand getBlockRecoveryCommand(String blockPoolId, DatanodeDescriptor nodeinfo) {
    BlockInfo[] blocks = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
    if (blocks == null) {
        return null;
    }
    BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.length);
    for (BlockInfo b : blocks) {
        BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
        assert uc != null;
        final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
        // Skip stale nodes during recovery
        final List<DatanodeStorageInfo> recoveryLocations = new ArrayList<>(storages.length);
        for (DatanodeStorageInfo storage : storages) {
            if (!storage.getDatanodeDescriptor().isStale(staleInterval)) {
                recoveryLocations.add(storage);
            }
        }
        // If we are performing a truncate recovery than set recovery fields
        // to old block.
        boolean truncateRecovery = uc.getTruncateBlock() != null;
        boolean copyOnTruncateRecovery = truncateRecovery && uc.getTruncateBlock().getBlockId() != b.getBlockId();
        ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) : new ExtendedBlock(blockPoolId, b);
        // If we only get 1 replica after eliminating stale nodes, choose all
        // replicas for recovery and let the primary data node handle failures.
        DatanodeInfo[] recoveryInfos;
        if (recoveryLocations.size() > 1) {
            if (recoveryLocations.size() != storages.length) {
                LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size()));
            }
            recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(recoveryLocations);
        } else {
            // If too many replicas are stale, then choose all replicas to
            // participate in block recovery.
            recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages);
        }
        RecoveringBlock rBlock;
        if (truncateRecovery) {
            Block recoveryBlock = (copyOnTruncateRecovery) ? b : uc.getTruncateBlock();
            rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, recoveryBlock);
        } else {
            rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, uc.getBlockRecoveryId());
            if (b.isStriped()) {
                rBlock = new RecoveringStripedBlock(rBlock, uc.getBlockIndices(), ((BlockInfoStriped) b).getErasureCodingPolicy());
            }
        }
        brCommand.add(rBlock);
    }
    return brCommand;
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) CachedBlock(org.apache.hadoop.hdfs.server.namenode.CachedBlock) RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)

Example 4 with RecoveringStripedBlock

use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.

the class PBHelper method convert.

public static RecoveringBlock convert(RecoveringBlockProto b) {
    LocatedBlock lb = PBHelperClient.convertLocatedBlockProto(b.getBlock());
    RecoveringBlock rBlock;
    if (b.hasTruncateBlock()) {
        rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), PBHelperClient.convert(b.getTruncateBlock()));
    } else {
        rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), b.getNewGenStamp());
    }
    if (b.hasEcPolicy()) {
        assert b.hasBlockIndices();
        byte[] indices = b.getBlockIndices().toByteArray();
        rBlock = new RecoveringStripedBlock(rBlock, indices, PBHelperClient.convertErasureCodingPolicy(b.getEcPolicy()));
    }
    return rBlock;
}
Also used : RecoveringStripedBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Aggregations

RecoveringStripedBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock)4 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)2 HashMap (java.util.HashMap)1 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 LocatedBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto)1 RecoveringBlockProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto)1 BlockRecord (org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord)1 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)1 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)1 Test (org.junit.Test)1 Matchers.anyLong (org.mockito.Matchers.anyLong)1