Search in sources :

Example 11 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method updateReplicaUnderRecovery.

// FsDatasetSpi
@Override
public Replica updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newlength) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        //get replica
        final String bpid = oldBlock.getBlockPoolId();
        final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
        LOG.info("updateReplica: " + oldBlock + ", recoveryId=" + recoveryId + ", length=" + newlength + ", replica=" + replica);
        //check replica
        if (replica == null) {
            throw new ReplicaNotFoundException(oldBlock);
        }
        //check replica state
        if (replica.getState() != ReplicaState.RUR) {
            throw new IOException("replica.getState() != " + ReplicaState.RUR + ", replica=" + replica);
        }
        //check replica's byte on disk
        if (replica.getBytesOnDisk() != oldBlock.getNumBytes()) {
            throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " replica.getBytesOnDisk() != block.getNumBytes(), block=" + oldBlock + ", replica=" + replica);
        }
        //check replica files before update
        checkReplicaFiles(replica);
        //update replica
        final ReplicaInfo finalized = updateReplicaUnderRecovery(oldBlock.getBlockPoolId(), replica, recoveryId, newBlockId, newlength);
        boolean copyTruncate = newBlockId != oldBlock.getBlockId();
        if (!copyTruncate) {
            assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == recoveryId && finalized.getNumBytes() == newlength : "Replica information mismatched: oldBlock=" + oldBlock + ", recoveryId=" + recoveryId + ", newlength=" + newlength + ", newBlockId=" + newBlockId + ", finalized=" + finalized;
        } else {
            assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == oldBlock.getGenerationStamp() && finalized.getNumBytes() == oldBlock.getNumBytes() : "Finalized and old information mismatched: oldBlock=" + oldBlock + ", genStamp=" + oldBlock.getGenerationStamp() + ", len=" + oldBlock.getNumBytes() + ", finalized=" + finalized;
        }
        //check replica files after update
        checkReplicaFiles(finalized);
        return finalized;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 12 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method recoverRbwImpl.

private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check generation stamp
        long replicaGenerationStamp = rbw.getGenerationStamp();
        if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
        }
        // check replica length
        long bytesAcked = rbw.getBytesAcked();
        long numBytes = rbw.getNumBytes();
        if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd) {
            throw new ReplicaNotFoundException("Unmatched length replica " + rbw + ": BytesAcked = " + bytesAcked + " BytesRcvd = " + numBytes + " are not in the range of [" + minBytesRcvd + ", " + maxBytesRcvd + "].");
        }
        FsVolumeReference ref = rbw.getReplicaInfo().getVolume().obtainReference();
        try {
            // any corrupt data written after the acked length can go unnoticed.
            if (numBytes > bytesAcked) {
                rbw.getReplicaInfo().truncateBlock(bytesAcked);
                rbw.setNumBytes(bytesAcked);
                rbw.setLastChecksumAndDataLen(bytesAcked, null);
            }
            // bump the replica's generation stamp to newGS
            rbw.getReplicaInfo().bumpReplicaGS(newGS);
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(rbw, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 13 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method moveBlockAcrossVolumes.

/**
   * Moves a given block from one volume to another volume. This is used by disk
   * balancer.
   *
   * @param block       - ExtendedBlock
   * @param destination - Destination volume
   * @return Old replica info
   */
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi destination) throws IOException {
    ReplicaInfo replicaInfo = getReplicaInfo(block);
    if (replicaInfo.getState() != ReplicaState.FINALIZED) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
    }
    FsVolumeReference volumeRef = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        volumeRef = destination.obtainReference();
    }
    try {
        moveBlock(block, replicaInfo, volumeRef);
    } finally {
        if (volumeRef != null) {
            volumeRef.close();
        }
    }
    return replicaInfo;
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 14 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class MiniDFSCluster method corruptBlockOnDataNodesHelper.

private int corruptBlockOnDataNodesHelper(ExtendedBlock block, boolean deleteBlockFile) throws IOException {
    int blocksCorrupted = 0;
    for (DataNode dn : getDataNodes()) {
        try {
            MaterializedReplica replica = getFsDatasetTestUtils(dn).getMaterializedReplica(block);
            if (deleteBlockFile) {
                replica.deleteData();
            } else {
                replica.corruptData();
            }
            blocksCorrupted++;
        } catch (ReplicaNotFoundException e) {
        // Ignore.
        }
    }
    return blocksCorrupted;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)

Aggregations

ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)14 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)10 IOException (java.io.IOException)9 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)8 MultipleIOException (org.apache.hadoop.io.MultipleIOException)7 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)4 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)3 File (java.io.File)2 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)2 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 InterruptedIOException (java.io.InterruptedIOException)1 RandomAccessFile (java.io.RandomAccessFile)1 LinkedList (java.util.LinkedList)1 DfsClientConf (org.apache.hadoop.hdfs.client.impl.DfsClientConf)1 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1