Search in sources :

Example 21 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method createTemporary.

// FsDatasetSpi
@Override
public ReplicaHandler createTemporary(StorageType storageType, ExtendedBlock b) throws IOException {
    long startTimeMs = Time.monotonicNow();
    long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
    ReplicaInfo lastFoundReplicaInfo = null;
    do {
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            ReplicaInfo currentReplicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
            if (currentReplicaInfo == lastFoundReplicaInfo) {
                if (lastFoundReplicaInfo != null) {
                    invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
                }
                FsVolumeReference ref = volumes.getNextVolume(storageType, b.getNumBytes());
                FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
                ReplicaInPipeline newReplicaInfo;
                try {
                    newReplicaInfo = v.createTemporary(b);
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
                return new ReplicaHandler(newReplicaInfo, ref);
            } else {
                if (!(currentReplicaInfo.getGenerationStamp() < b.getGenerationStamp() && (currentReplicaInfo.getState() == ReplicaState.TEMPORARY || currentReplicaInfo.getState() == ReplicaState.RBW))) {
                    throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + currentReplicaInfo.getState() + " and thus cannot be created.");
                }
                lastFoundReplicaInfo = currentReplicaInfo;
            }
        }
        // Hang too long, just bail out. This is not supposed to happen.
        long writerStopMs = Time.monotonicNow() - startTimeMs;
        if (writerStopMs > writerStopTimeoutMs) {
            LOG.warn("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
            throw new IOException("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
        }
        // Stop the previous writer
        ((ReplicaInPipeline) lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs);
    } while (true);
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 22 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsVolumeImpl method moveBlockToTmpLocation.

public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block, ReplicaInfo replicaInfo, int smallBufferSize, Configuration conf) throws IOException {
    File[] blockFiles = FsDatasetImpl.copyBlockFiles(block.getBlockId(), block.getGenerationStamp(), replicaInfo, getTmpDir(block.getBlockPoolId()), replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
    ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY).setBlockId(replicaInfo.getBlockId()).setGenerationStamp(replicaInfo.getGenerationStamp()).setFsVolume(this).setDirectoryToUse(blockFiles[0].getParentFile()).setBytesToReserve(0).build();
    newReplicaInfo.setNumBytes(blockFiles[1].length());
    return newReplicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 23 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method getTmpInputStreams.

/**
   * Returns handles to the block file and its metadata file
   */
// FsDatasetSpi
@Override
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo info = getReplicaInfo(b);
        FsVolumeReference ref = info.getVolume().obtainReference();
        try {
            InputStream blockInStream = info.getDataInputStream(blkOffset);
            try {
                InputStream metaInStream = info.getMetadataInputStream(metaOffset);
                return new ReplicaInputStreams(blockInStream, metaInStream, ref, datanode.getFileIoProvider());
            } catch (IOException e) {
                IOUtils.cleanup(null, blockInStream);
                throw e;
            }
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
    }
}
Also used : ReplicaInputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) LengthInputStream(org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 24 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method recoverCheck.

private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException, MustStopExistingWriter {
    ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
    // check state
    if (replicaInfo.getState() != ReplicaState.FINALIZED && replicaInfo.getState() != ReplicaState.RBW) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
    }
    // check generation stamp
    long replicaGenerationStamp = replicaInfo.getGenerationStamp();
    if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
    }
    // stop the previous writer before check a replica's length
    long replicaLen = replicaInfo.getNumBytes();
    if (replicaInfo.getState() == ReplicaState.RBW) {
        ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
        if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
            throw new MustStopExistingWriter(rbw);
        }
        // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
        if (replicaLen != rbw.getBytesOnDisk() || replicaLen != rbw.getBytesAcked()) {
            throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() + ") are not the same.");
        }
    }
    // check block length
    if (replicaLen != expectedBlockLen) {
        throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaLen + " expected length is " + expectedBlockLen);
    }
    return replicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 25 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method getBlockInputStream.

// FsDatasetSpi
@Override
public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException {
    ReplicaInfo info;
    synchronized (this) {
        info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
    }
    if (info != null && info.getVolume().isTransientStorage()) {
        ramDiskReplicaTracker.touch(b.getBlockPoolId(), b.getBlockId());
        datanode.getMetrics().incrRamDiskBlocksReadHits();
    }
    if (info != null && info.blockDataExists()) {
        return info.getDataInputStream(seekOffset);
    } else {
        throw new IOException("No data exists for block " + b);
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Aggregations

ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)48 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)27 IOException (java.io.IOException)19 MultipleIOException (org.apache.hadoop.io.MultipleIOException)16 File (java.io.File)11 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)10 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)10 RandomAccessFile (java.io.RandomAccessFile)7 ReplicaBuilder (org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 Block (org.apache.hadoop.hdfs.protocol.Block)4 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FileInputStream (java.io.FileInputStream)3 FileNotFoundException (java.io.FileNotFoundException)3 ArrayList (java.util.ArrayList)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2