Search in sources :

Example 6 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method createTemporary.

// FsDatasetSpi
@Override
public ReplicaHandler createTemporary(StorageType storageType, ExtendedBlock b) throws IOException {
    long startTimeMs = Time.monotonicNow();
    long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
    ReplicaInfo lastFoundReplicaInfo = null;
    do {
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            ReplicaInfo currentReplicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
            if (currentReplicaInfo == lastFoundReplicaInfo) {
                if (lastFoundReplicaInfo != null) {
                    invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
                }
                FsVolumeReference ref = volumes.getNextVolume(storageType, b.getNumBytes());
                FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
                ReplicaInPipeline newReplicaInfo;
                try {
                    newReplicaInfo = v.createTemporary(b);
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
                return new ReplicaHandler(newReplicaInfo, ref);
            } else {
                if (!(currentReplicaInfo.getGenerationStamp() < b.getGenerationStamp() && (currentReplicaInfo.getState() == ReplicaState.TEMPORARY || currentReplicaInfo.getState() == ReplicaState.RBW))) {
                    throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + currentReplicaInfo.getState() + " and thus cannot be created.");
                }
                lastFoundReplicaInfo = currentReplicaInfo;
            }
        }
        // Hang too long, just bail out. This is not supposed to happen.
        long writerStopMs = Time.monotonicNow() - startTimeMs;
        if (writerStopMs > writerStopTimeoutMs) {
            LOG.warn("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
            throw new IOException("Unable to stop existing writer for block " + b + " after " + writerStopMs + " miniseconds.");
        }
        // Stop the previous writer
        ((ReplicaInPipeline) lastFoundReplicaInfo).stopWriter(writerStopTimeoutMs);
    } while (true);
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 7 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class TestWriteToReplica method testWriteToTemporary.

private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) throws IOException {
    try {
        dataSet.createTemporary(StorageType.DEFAULT, blocks[FINALIZED]);
        Assert.fail("Should not have created a temporary replica that was " + "finalized " + blocks[FINALIZED]);
    } catch (ReplicaAlreadyExistsException e) {
    }
    try {
        dataSet.createTemporary(StorageType.DEFAULT, blocks[TEMPORARY]);
        Assert.fail("Should not have created a replica that had created as" + "temporary " + blocks[TEMPORARY]);
    } catch (ReplicaAlreadyExistsException e) {
    }
    try {
        dataSet.createTemporary(StorageType.DEFAULT, blocks[RBW]);
        Assert.fail("Should not have created a replica that had created as RBW " + blocks[RBW]);
    } catch (ReplicaAlreadyExistsException e) {
    }
    try {
        dataSet.createTemporary(StorageType.DEFAULT, blocks[RWR]);
        Assert.fail("Should not have created a replica that was waiting to be " + "recovered " + blocks[RWR]);
    } catch (ReplicaAlreadyExistsException e) {
    }
    try {
        dataSet.createTemporary(StorageType.DEFAULT, blocks[RUR]);
        Assert.fail("Should not have created a replica that was under recovery " + blocks[RUR]);
    } catch (ReplicaAlreadyExistsException e) {
    }
    dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
    try {
        dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
        Assert.fail("Should not have created a replica that had already been " + "created " + blocks[NON_EXISTENT]);
    } catch (Exception e) {
        Assert.assertTrue(e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
        Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
    }
    long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
    blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
    try {
        ReplicaInPipeline replicaInfo = dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]).getReplica();
        Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
        Assert.assertTrue(replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
    } catch (ReplicaAlreadyExistsException e) {
        Assert.fail("createTemporary should have allowed the block with newer " + " generation stamp to be created " + blocks[NON_EXISTENT]);
    }
}
Also used : ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) IOException(java.io.IOException) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)

Example 8 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method recoverCheck.

private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException, MustStopExistingWriter {
    ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
    // check state
    if (replicaInfo.getState() != ReplicaState.FINALIZED && replicaInfo.getState() != ReplicaState.RBW) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
    }
    // check generation stamp
    long replicaGenerationStamp = replicaInfo.getGenerationStamp();
    if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
    }
    // stop the previous writer before check a replica's length
    long replicaLen = replicaInfo.getNumBytes();
    if (replicaInfo.getState() == ReplicaState.RBW) {
        ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
        if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
            throw new MustStopExistingWriter(rbw);
        }
        // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
        if (replicaLen != rbw.getBytesOnDisk() || replicaLen != rbw.getBytesAcked()) {
            throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() + ") are not the same.");
        }
    }
    // check block length
    if (replicaLen != expectedBlockLen) {
        throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaLen + " expected length is " + expectedBlockLen);
    }
    return replicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 9 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method convertTemporaryToRbw.

// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final long blockId = b.getBlockId();
        final long expectedGs = b.getGenerationStamp();
        final long visible = b.getNumBytes();
        LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
        final ReplicaInfo temp;
        {
            // get replica
            final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
            if (r == null) {
                throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
            }
            // check the replica's state
            if (r.getState() != ReplicaState.TEMPORARY) {
                throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
            }
            temp = r;
        }
        // check generation stamp
        if (temp.getGenerationStamp() != expectedGs) {
            throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
        }
        // TODO: check writer?
        // set writer to the current thread
        // temp.setWriter(Thread.currentThread());
        // check length
        final long numBytes = temp.getNumBytes();
        if (numBytes < visible) {
            throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
        }
        // check volume
        final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
        if (v == null) {
            throw new IOException("r.getVolume() = null, temp=" + temp);
        }
        final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
        if (rbw.getState() != ReplicaState.RBW) {
            throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
        }
        // overwrite the RBW in the volume map
        volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
        return rbw;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 10 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method updateReplicaUnderRecovery.

private ReplicaInfo updateReplicaUnderRecovery(String bpid, ReplicaInfo rur, long recoveryId, long newBlockId, long newlength) throws IOException {
    //check recovery id
    if (rur.getRecoveryID() != recoveryId) {
        throw new IOException("rur.getRecoveryID() != recoveryId = " + recoveryId + ", rur=" + rur);
    }
    boolean copyOnTruncate = newBlockId > 0L && rur.getBlockId() != newBlockId;
    // bump rur's GS to be recovery id
    if (!copyOnTruncate) {
        rur.bumpReplicaGS(recoveryId);
    }
    //update length
    if (rur.getNumBytes() < newlength) {
        throw new IOException("rur.getNumBytes() < newlength = " + newlength + ", rur=" + rur);
    }
    if (rur.getNumBytes() > newlength) {
        if (!copyOnTruncate) {
            rur.breakHardLinksIfNeeded();
            rur.truncateBlock(newlength);
            // update RUR with the new length
            rur.setNumBytes(newlength);
        } else {
            // Copying block to a new block with new blockId.
            // Not truncating original block.
            FsVolumeImpl volume = (FsVolumeImpl) rur.getVolume();
            ReplicaInPipeline newReplicaInfo = volume.updateRURCopyOnTruncate(rur, bpid, newBlockId, recoveryId, newlength);
            if (newReplicaInfo.getState() != ReplicaState.RBW) {
                throw new IOException("Append on block " + rur.getBlockId() + " returned a replica of state " + newReplicaInfo.getState() + "; expected RBW");
            }
            newReplicaInfo.setNumBytes(newlength);
            volumeMap.add(bpid, newReplicaInfo.getReplicaInfo());
            finalizeReplica(bpid, newReplicaInfo.getReplicaInfo());
        }
    }
    // finalize the block
    return finalizeReplica(bpid, rur);
}
Also used : IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Aggregations

ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)14 IOException (java.io.IOException)12 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)10 MultipleIOException (org.apache.hadoop.io.MultipleIOException)10 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)9 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)5 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)4 DiskOutOfSpaceException (org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException)2 File (java.io.File)1 ClosedChannelException (java.nio.channels.ClosedChannelException)1 ArrayList (java.util.ArrayList)1 Random (java.util.Random)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 ExtendedBlockId (org.apache.hadoop.hdfs.ExtendedBlockId)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1