use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.
the class SimulatedFSDataset method initReplicaRecovery.
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException {
ExtendedBlock b = rBlock.getBlock();
final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
BInfo binfo = map.get(b.getLocalBlock());
if (binfo == null) {
throw new IOException("No such Block " + b);
}
return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), binfo.getGenerationStamp(), binfo.isFinalized() ? ReplicaState.FINALIZED : ReplicaState.RBW);
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.
the class DatanodeManager method getBlockRecoveryCommand.
private BlockRecoveryCommand getBlockRecoveryCommand(String blockPoolId, DatanodeDescriptor nodeinfo) {
BlockInfo[] blocks = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
if (blocks == null) {
return null;
}
BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.length);
for (BlockInfo b : blocks) {
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
assert uc != null;
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
// Skip stale nodes during recovery
final List<DatanodeStorageInfo> recoveryLocations = new ArrayList<>(storages.length);
for (DatanodeStorageInfo storage : storages) {
if (!storage.getDatanodeDescriptor().isStale(staleInterval)) {
recoveryLocations.add(storage);
}
}
// If we are performing a truncate recovery than set recovery fields
// to old block.
boolean truncateRecovery = uc.getTruncateBlock() != null;
boolean copyOnTruncateRecovery = truncateRecovery && uc.getTruncateBlock().getBlockId() != b.getBlockId();
ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) : new ExtendedBlock(blockPoolId, b);
// If we only get 1 replica after eliminating stale nodes, choose all
// replicas for recovery and let the primary data node handle failures.
DatanodeInfo[] recoveryInfos;
if (recoveryLocations.size() > 1) {
if (recoveryLocations.size() != storages.length) {
LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size()));
}
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(recoveryLocations);
} else {
// If too many replicas are stale, then choose all replicas to
// participate in block recovery.
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages);
}
RecoveringBlock rBlock;
if (truncateRecovery) {
Block recoveryBlock = (copyOnTruncateRecovery) ? b : uc.getTruncateBlock();
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, recoveryBlock);
} else {
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, uc.getBlockRecoveryId());
if (b.isStriped()) {
rBlock = new RecoveringStripedBlock(rBlock, uc.getBlockIndices(), ((BlockInfoStriped) b).getErasureCodingPolicy());
}
}
brCommand.add(rBlock);
}
return brCommand;
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.
the class BlockRecoveryWorker method recoverBlocks.
public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(datanode.threadGroup, new Runnable() {
@Override
public void run() {
for (RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
if (b.isStriped()) {
new RecoveryTaskStriped((RecoveringStripedBlock) b).recover();
} else {
new RecoveryTaskContiguous(b).recover();
}
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.
the class PBHelper method convert.
public static RecoveringBlock convert(RecoveringBlockProto b) {
LocatedBlock lb = PBHelperClient.convertLocatedBlockProto(b.getBlock());
RecoveringBlock rBlock;
if (b.hasTruncateBlock()) {
rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), PBHelperClient.convert(b.getTruncateBlock()));
} else {
rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), b.getNewGenStamp());
}
if (b.hasEcPolicy()) {
assert b.hasBlockIndices();
byte[] indices = b.getBlockIndices().toByteArray();
rBlock = new RecoveringStripedBlock(rBlock, indices, PBHelperClient.convertErasureCodingPolicy(b.getEcPolicy()));
}
return rBlock;
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock in project hadoop by apache.
the class TestPBHelper method testConvertRecoveringBlock.
@Test
public void testConvertRecoveringBlock() {
DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
assertEquals(b.getBlock(), b1.getBlock());
DatanodeInfo[] dnInfo1 = b1.getLocations();
assertEquals(dnInfo.length, dnInfo1.length);
for (int i = 0; i < dnInfo.length; i++) {
compare(dnInfo[0], dnInfo1[0]);
}
}
Aggregations