use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.
the class PBHelper method convert.
public static RecoveringBlockProto convert(RecoveringBlock b) {
if (b == null) {
return null;
}
LocatedBlockProto lb = PBHelperClient.convertLocatedBlock(b);
RecoveringBlockProto.Builder builder = RecoveringBlockProto.newBuilder();
builder.setBlock(lb).setNewGenStamp(b.getNewGenerationStamp());
if (b.getNewBlock() != null)
builder.setTruncateBlock(PBHelperClient.convert(b.getNewBlock()));
if (b instanceof RecoveringStripedBlock) {
RecoveringStripedBlock sb = (RecoveringStripedBlock) b;
builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(sb.getErasureCodingPolicy()));
builder.setBlockIndices(PBHelperClient.getByteString(sb.getBlockIndices()));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.
the class TestBlockRecovery method testSafeLength.
@Test(timeout = 60000)
public void testSafeLength() throws Exception {
// hard coded policy to work with hard coded test suite
ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getSystemPolicies()[0];
RecoveringStripedBlock rBlockStriped = new RecoveringStripedBlock(rBlock, new byte[9], ecPolicy);
BlockRecoveryWorker recoveryWorker = new BlockRecoveryWorker(dn);
BlockRecoveryWorker.RecoveryTaskStriped recoveryTask = recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
for (int i = 0; i < blockLengthsSuite.length; i++) {
int[] blockLengths = blockLengthsSuite[i][0];
int safeLength = blockLengthsSuite[i][1][0];
Map<Long, BlockRecord> syncList = new HashMap<>();
for (int id = 0; id < blockLengths.length; id++) {
ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id, blockLengths[id], 0, null);
syncList.put((long) id, new BlockRecord(null, null, rInfo));
}
Assert.assertEquals("BLOCK_LENGTHS_SUITE[" + i + "]", safeLength, recoveryTask.getSafeLength(syncList));
}
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.
the class DatanodeManager method getBlockRecoveryCommand.
private BlockRecoveryCommand getBlockRecoveryCommand(String blockPoolId, DatanodeDescriptor nodeinfo) {
BlockInfo[] blocks = nodeinfo.getLeaseRecoveryCommand(Integer.MAX_VALUE);
if (blocks == null) {
return null;
}
BlockRecoveryCommand brCommand = new BlockRecoveryCommand(blocks.length);
for (BlockInfo b : blocks) {
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
assert uc != null;
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
// Skip stale nodes during recovery
final List<DatanodeStorageInfo> recoveryLocations = new ArrayList<>(storages.length);
for (DatanodeStorageInfo storage : storages) {
if (!storage.getDatanodeDescriptor().isStale(staleInterval)) {
recoveryLocations.add(storage);
}
}
// If we are performing a truncate recovery than set recovery fields
// to old block.
boolean truncateRecovery = uc.getTruncateBlock() != null;
boolean copyOnTruncateRecovery = truncateRecovery && uc.getTruncateBlock().getBlockId() != b.getBlockId();
ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, uc.getTruncateBlock()) : new ExtendedBlock(blockPoolId, b);
// If we only get 1 replica after eliminating stale nodes, choose all
// replicas for recovery and let the primary data node handle failures.
DatanodeInfo[] recoveryInfos;
if (recoveryLocations.size() > 1) {
if (recoveryLocations.size() != storages.length) {
LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size()));
}
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(recoveryLocations);
} else {
// If too many replicas are stale, then choose all replicas to
// participate in block recovery.
recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages);
}
RecoveringBlock rBlock;
if (truncateRecovery) {
Block recoveryBlock = (copyOnTruncateRecovery) ? b : uc.getTruncateBlock();
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, recoveryBlock);
} else {
rBlock = new RecoveringBlock(primaryBlock, recoveryInfos, uc.getBlockRecoveryId());
if (b.isStriped()) {
rBlock = new RecoveringStripedBlock(rBlock, uc.getBlockIndices(), ((BlockInfoStriped) b).getErasureCodingPolicy());
}
}
brCommand.add(rBlock);
}
return brCommand;
}
use of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock in project hadoop by apache.
the class PBHelper method convert.
public static RecoveringBlock convert(RecoveringBlockProto b) {
LocatedBlock lb = PBHelperClient.convertLocatedBlockProto(b.getBlock());
RecoveringBlock rBlock;
if (b.hasTruncateBlock()) {
rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), PBHelperClient.convert(b.getTruncateBlock()));
} else {
rBlock = new RecoveringBlock(lb.getBlock(), lb.getLocations(), b.getNewGenStamp());
}
if (b.hasEcPolicy()) {
assert b.hasBlockIndices();
byte[] indices = b.getBlockIndices().toByteArray();
rBlock = new RecoveringStripedBlock(rBlock, indices, PBHelperClient.convertErasureCodingPolicy(b.getEcPolicy()));
}
return rBlock;
}
Aggregations