Search in sources :

Example 16 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method append.

// FsDatasetSpi
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check the validity of the parameter
        if (newGS < b.getGenerationStamp()) {
            throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp");
        }
        ReplicaInfo replicaInfo = getReplicaInfo(b);
        LOG.info("Appending to " + replicaInfo);
        if (replicaInfo.getState() != ReplicaState.FINALIZED) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
        }
        if (replicaInfo.getNumBytes() != expectedBlockLen) {
            throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen);
        }
        FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
        ReplicaInPipeline replica = null;
        try {
            replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(replica, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 17 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method cacheBlock.

/**
   * Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
   */
private void cacheBlock(String bpid, long blockId) {
    FsVolumeImpl volume;
    String blockFileName;
    long length, genstamp;
    Executor volumeExecutor;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo info = volumeMap.get(bpid, blockId);
        boolean success = false;
        try {
            if (info == null) {
                LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": ReplicaInfo not found.");
                return;
            }
            if (info.getState() != ReplicaState.FINALIZED) {
                LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": replica is not finalized; it is in state " + info.getState());
                return;
            }
            try {
                volume = (FsVolumeImpl) info.getVolume();
                if (volume == null) {
                    LOG.warn("Failed to cache block with id " + blockId + ", pool " + bpid + ": volume not found.");
                    return;
                }
            } catch (ClassCastException e) {
                LOG.warn("Failed to cache block with id " + blockId + ": volume was not an instance of FsVolumeImpl.");
                return;
            }
            if (volume.isTransientStorage()) {
                LOG.warn("Caching not supported on block with id " + blockId + " since the volume is backed by RAM.");
                return;
            }
            success = true;
        } finally {
            if (!success) {
                cacheManager.numBlocksFailedToCache.incrementAndGet();
            }
        }
        blockFileName = info.getBlockURI().toString();
        length = info.getVisibleLength();
        genstamp = info.getGenerationStamp();
        volumeExecutor = volume.getCacheExecutor();
    }
    cacheManager.cacheBlock(blockId, bpid, blockFileName, length, genstamp, volumeExecutor);
}
Also used : Executor(java.util.concurrent.Executor) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 18 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method recoverClose.

// FsDatasetSpi
@Override
public Replica recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    LOG.info("Recover failed close " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                // check replica's state
                ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
                // bump the replica's GS
                replicaInfo.bumpReplicaGS(newGS);
                // finalize the replica if RBW
                if (replicaInfo.getState() == ReplicaState.RBW) {
                    finalizeReplica(b.getBlockPoolId(), replicaInfo);
                }
                return replicaInfo;
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 19 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method contains.

// FsDatasetSpi
@Override
public boolean contains(final ExtendedBlock block) {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final long blockId = block.getLocalBlock().getBlockId();
        final String bpid = block.getBlockPoolId();
        final ReplicaInfo r = volumeMap.get(bpid, blockId);
        return (r != null && r.blockDataExists());
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 20 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method recoverRbw.

// FsDatasetSpi
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    LOG.info("Recover RBW replica " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
                // check the replica's state
                if (replicaInfo.getState() != ReplicaState.RBW) {
                    throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
                }
                ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
                if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
                    throw new MustStopExistingWriter(rbw);
                }
                LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
                return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Aggregations

ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)48 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)27 IOException (java.io.IOException)19 MultipleIOException (org.apache.hadoop.io.MultipleIOException)16 File (java.io.File)11 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)10 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)10 RandomAccessFile (java.io.RandomAccessFile)7 ReplicaBuilder (org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 Block (org.apache.hadoop.hdfs.protocol.Block)4 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FileInputStream (java.io.FileInputStream)3 FileNotFoundException (java.io.FileNotFoundException)3 ArrayList (java.util.ArrayList)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2