Search in sources :

Example 31 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method getTmpInputStreams.

/**
   * Returns handles to the block file and its metadata file
   */
// FsDatasetSpi
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo info = getReplicaInfo(b);
        FsVolumeReference ref = info.getVolume().obtainReference();
        try {
            InputStream blockInStream = info.getDataInputStream(blkOffset);
            try {
                InputStream metaInStream = info.getMetadataInputStream(metaOffset);
                return new ReplicaInputStreams(blockInStream, metaInStream, ref, datanode.getFileIoProvider());
            } catch (IOException e) {
                IOUtils.cleanup(null, blockInStream);
                throw e;
            }
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
    }
}
Also used : ReplicaInputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) LengthInputStream(org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 32 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method recoverCheck.

private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException, MustStopExistingWriter {
    ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
    // check state
    if (replicaInfo.getState() != ReplicaState.FINALIZED && replicaInfo.getState() != ReplicaState.RBW) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
    }
    // check generation stamp
    long replicaGenerationStamp = replicaInfo.getGenerationStamp();
    if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
    }
    // stop the previous writer before check a replica's length
    long replicaLen = replicaInfo.getNumBytes();
    if (replicaInfo.getState() == ReplicaState.RBW) {
        ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
        if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
            throw new MustStopExistingWriter(rbw);
        }
        // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
        if (replicaLen != rbw.getBytesOnDisk() || replicaLen != rbw.getBytesAcked()) {
            throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() + ") are not the same.");
        }
    }
    // check block length
    if (replicaLen != expectedBlockLen) {
        throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaLen + " expected length is " + expectedBlockLen);
    }
    return replicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 33 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method getBlockInputStream.

// FsDatasetSpi
@Override
public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException {
    ReplicaInfo info;
    synchronized (this) {
        info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
    }
    if (info != null && info.getVolume().isTransientStorage()) {
        ramDiskReplicaTracker.touch(b.getBlockPoolId(), b.getBlockId());
        datanode.getMetrics().incrRamDiskBlocksReadHits();
    }
    if (info != null && info.blockDataExists()) {
        return info.getDataInputStream(seekOffset);
    } else {
        throw new IOException("No data exists for block " + b);
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 34 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method convertTemporaryToRbw.

// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final long blockId = b.getBlockId();
        final long expectedGs = b.getGenerationStamp();
        final long visible = b.getNumBytes();
        LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
        final ReplicaInfo temp;
        {
            // get replica
            final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
            if (r == null) {
                throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
            }
            // check the replica's state
            if (r.getState() != ReplicaState.TEMPORARY) {
                throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
            }
            temp = r;
        }
        // check generation stamp
        if (temp.getGenerationStamp() != expectedGs) {
            throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
        }
        // TODO: check writer?
        // set writer to the current thread
        // temp.setWriter(Thread.currentThread());
        // check length
        final long numBytes = temp.getNumBytes();
        if (numBytes < visible) {
            throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
        }
        // check volume
        final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
        if (v == null) {
            throw new IOException("r.getVolume() = null, temp=" + temp);
        }
        final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
        if (rbw.getState() != ReplicaState.RBW) {
            throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
        }
        // overwrite the RBW in the volume map
        volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
        return rbw;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 35 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method updateReplicaUnderRecovery.

// FsDatasetSpi
@Override
public Replica updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newlength) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        //get replica
        final String bpid = oldBlock.getBlockPoolId();
        final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
        LOG.info("updateReplica: " + oldBlock + ", recoveryId=" + recoveryId + ", length=" + newlength + ", replica=" + replica);
        //check replica
        if (replica == null) {
            throw new ReplicaNotFoundException(oldBlock);
        }
        //check replica state
        if (replica.getState() != ReplicaState.RUR) {
            throw new IOException("replica.getState() != " + ReplicaState.RUR + ", replica=" + replica);
        }
        //check replica's byte on disk
        if (replica.getBytesOnDisk() != oldBlock.getNumBytes()) {
            throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " replica.getBytesOnDisk() != block.getNumBytes(), block=" + oldBlock + ", replica=" + replica);
        }
        //check replica files before update
        checkReplicaFiles(replica);
        //update replica
        final ReplicaInfo finalized = updateReplicaUnderRecovery(oldBlock.getBlockPoolId(), replica, recoveryId, newBlockId, newlength);
        boolean copyTruncate = newBlockId != oldBlock.getBlockId();
        if (!copyTruncate) {
            assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == recoveryId && finalized.getNumBytes() == newlength : "Replica information mismatched: oldBlock=" + oldBlock + ", recoveryId=" + recoveryId + ", newlength=" + newlength + ", newBlockId=" + newBlockId + ", finalized=" + finalized;
        } else {
            assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == oldBlock.getGenerationStamp() && finalized.getNumBytes() == oldBlock.getNumBytes() : "Finalized and old information mismatched: oldBlock=" + oldBlock + ", genStamp=" + oldBlock.getGenerationStamp() + ", len=" + oldBlock.getNumBytes() + ", finalized=" + finalized;
        }
        //check replica files after update
        checkReplicaFiles(finalized);
        return finalized;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)48 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)27 IOException (java.io.IOException)19 MultipleIOException (org.apache.hadoop.io.MultipleIOException)16 File (java.io.File)11 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)10 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)10 RandomAccessFile (java.io.RandomAccessFile)7 ReplicaBuilder (org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 Block (org.apache.hadoop.hdfs.protocol.Block)4 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FileInputStream (java.io.FileInputStream)3 FileNotFoundException (java.io.FileNotFoundException)3 ArrayList (java.util.ArrayList)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2