use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsDatasetImpl method initReplicaRecoveryImpl.
static ReplicaRecoveryInfo initReplicaRecoveryImpl(String bpid, ReplicaMap map, Block block, long recoveryId) throws IOException, MustStopExistingWriter {
final ReplicaInfo replica = map.get(bpid, block.getBlockId());
LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId + ", replica=" + replica);
//check replica
if (replica == null) {
return null;
}
//stop writer if there is any
if (replica.getState() == ReplicaState.TEMPORARY || replica.getState() == ReplicaState.RBW) {
final ReplicaInPipeline rip = (ReplicaInPipeline) replica;
if (!rip.attemptToSetWriter(null, Thread.currentThread())) {
throw new MustStopExistingWriter(rip);
}
//check replica bytes on disk.
if (replica.getBytesOnDisk() < replica.getVisibleLength()) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " getBytesOnDisk() < getVisibleLength(), rip=" + replica);
}
//check the replica's files
checkReplicaFiles(replica);
}
//check generation stamp
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException("replica.getGenerationStamp() < block.getGenerationStamp(), block=" + block + ", replica=" + replica);
}
//check recovery id
if (replica.getGenerationStamp() >= recoveryId) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " replica.getGenerationStamp() >= recoveryId = " + recoveryId + ", block=" + block + ", replica=" + replica);
}
//check RUR
final ReplicaInfo rur;
if (replica.getState() == ReplicaState.RUR) {
rur = replica;
if (rur.getRecoveryID() >= recoveryId) {
throw new RecoveryInProgressException("rur.getRecoveryID() >= recoveryId = " + recoveryId + ", block=" + block + ", rur=" + rur);
}
final long oldRecoveryID = rur.getRecoveryID();
rur.setRecoveryID(recoveryId);
LOG.info("initReplicaRecovery: update recovery id for " + block + " from " + oldRecoveryID + " to " + recoveryId);
} else {
rur = new ReplicaBuilder(ReplicaState.RUR).from(replica).setRecoveryId(recoveryId).build();
map.add(bpid, rur);
LOG.info("initReplicaRecovery: changing replica state for " + block + " from " + replica.getState() + " to " + rur.getState());
}
return rur.createInfo();
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsDatasetImplTestUtils method createReplicaWaitingToBeRecovered.
@Override
public Replica createReplicaWaitingToBeRecovered(FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
FsVolumeImpl vol = (FsVolumeImpl) volume;
final String bpid = eb.getBlockPoolId();
final Block block = eb.getLocalBlock();
ReplicaInfo rwbr = new ReplicaBuilder(ReplicaState.RWR).setBlock(eb.getLocalBlock()).setFsVolume(volume).setDirectoryToUse(vol.createRbwFile(bpid, block).getParentFile()).build();
dataset.volumeMap.add(bpid, rwbr);
return rwbr;
}
Aggregations