use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method updateReplicaUnderRecovery.
// FsDatasetSpi
@Override
public Replica updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newlength) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
LOG.info("updateReplica: " + oldBlock + ", recoveryId=" + recoveryId + ", length=" + newlength + ", replica=" + replica);
//check replica
if (replica == null) {
throw new ReplicaNotFoundException(oldBlock);
}
//check replica state
if (replica.getState() != ReplicaState.RUR) {
throw new IOException("replica.getState() != " + ReplicaState.RUR + ", replica=" + replica);
}
//check replica's byte on disk
if (replica.getBytesOnDisk() != oldBlock.getNumBytes()) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " replica.getBytesOnDisk() != block.getNumBytes(), block=" + oldBlock + ", replica=" + replica);
}
//check replica files before update
checkReplicaFiles(replica);
//update replica
final ReplicaInfo finalized = updateReplicaUnderRecovery(oldBlock.getBlockPoolId(), replica, recoveryId, newBlockId, newlength);
boolean copyTruncate = newBlockId != oldBlock.getBlockId();
if (!copyTruncate) {
assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == recoveryId && finalized.getNumBytes() == newlength : "Replica information mismatched: oldBlock=" + oldBlock + ", recoveryId=" + recoveryId + ", newlength=" + newlength + ", newBlockId=" + newBlockId + ", finalized=" + finalized;
} else {
assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == oldBlock.getGenerationStamp() && finalized.getNumBytes() == oldBlock.getNumBytes() : "Finalized and old information mismatched: oldBlock=" + oldBlock + ", genStamp=" + oldBlock.getGenerationStamp() + ", len=" + oldBlock.getNumBytes() + ", finalized=" + finalized;
}
//check replica files after update
checkReplicaFiles(finalized);
return finalized;
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method recoverRbwImpl.
private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// check generation stamp
long replicaGenerationStamp = rbw.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
}
// check replica length
long bytesAcked = rbw.getBytesAcked();
long numBytes = rbw.getNumBytes();
if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd) {
throw new ReplicaNotFoundException("Unmatched length replica " + rbw + ": BytesAcked = " + bytesAcked + " BytesRcvd = " + numBytes + " are not in the range of [" + minBytesRcvd + ", " + maxBytesRcvd + "].");
}
FsVolumeReference ref = rbw.getReplicaInfo().getVolume().obtainReference();
try {
// any corrupt data written after the acked length can go unnoticed.
if (numBytes > bytesAcked) {
rbw.getReplicaInfo().truncateBlock(bytesAcked);
rbw.setNumBytes(bytesAcked);
rbw.setLastChecksumAndDataLen(bytesAcked, null);
}
// bump the replica's generation stamp to newGS
rbw.getReplicaInfo().bumpReplicaGS(newGS);
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(rbw, ref);
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class FsDatasetImpl method moveBlockAcrossVolumes.
/**
* Moves a given block from one volume to another volume. This is used by disk
* balancer.
*
* @param block - ExtendedBlock
* @param destination - Destination volume
* @return Old replica info
*/
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi destination) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(block);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
}
FsVolumeReference volumeRef = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumeRef = destination.obtainReference();
}
try {
moveBlock(block, replicaInfo, volumeRef);
} finally {
if (volumeRef != null) {
volumeRef.close();
}
}
return replicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.
the class MiniDFSCluster method corruptBlockOnDataNodesHelper.
private int corruptBlockOnDataNodesHelper(ExtendedBlock block, boolean deleteBlockFile) throws IOException {
int blocksCorrupted = 0;
for (DataNode dn : getDataNodes()) {
try {
MaterializedReplica replica = getFsDatasetTestUtils(dn).getMaterializedReplica(block);
if (deleteBlockFile) {
replica.deleteData();
} else {
replica.corruptData();
}
blocksCorrupted++;
} catch (ReplicaNotFoundException e) {
// Ignore.
}
}
return blocksCorrupted;
}
Aggregations