Search in sources :

Example 6 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method recoverRbw.

// FsDatasetSpi
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    LOG.info("Recover RBW replica " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
                // check the replica's state
                if (replicaInfo.getState() != ReplicaState.RBW) {
                    throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
                }
                ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
                if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
                    throw new MustStopExistingWriter(rbw);
                }
                LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
                return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 7 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class TestWriteToReplica method testAppend.

private void testAppend(String bpid, FsDatasetSpi<?> dataSet, ExtendedBlock[] blocks) throws IOException {
    long newGS = blocks[FINALIZED].getGenerationStamp() + 1;
    final FsVolumeSpi v = dataSet.getVolume(blocks[FINALIZED]);
    if (v instanceof FsVolumeImpl) {
        FsVolumeImpl fvi = (FsVolumeImpl) v;
        long available = fvi.getCapacity() - fvi.getDfsUsed();
        long expectedLen = blocks[FINALIZED].getNumBytes();
        try {
            fvi.onBlockFileDeletion(bpid, -available);
            blocks[FINALIZED].setNumBytes(expectedLen + 100);
            dataSet.append(blocks[FINALIZED], newGS, expectedLen);
            Assert.fail("Should not have space to append to an RWR replica" + blocks[RWR]);
        } catch (DiskOutOfSpaceException e) {
            Assert.assertTrue(e.getMessage().startsWith("Insufficient space for appending to "));
        }
        fvi.onBlockFileDeletion(bpid, available);
        blocks[FINALIZED].setNumBytes(expectedLen);
    }
    newGS = blocks[RBW].getGenerationStamp() + 1;
    dataSet.append(blocks[FINALIZED], newGS, // successful
    blocks[FINALIZED].getNumBytes());
    blocks[FINALIZED].setGenerationStamp(newGS);
    try {
        dataSet.append(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
        Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[TEMPORARY], e.getMessage());
    }
    try {
        dataSet.append(blocks[RBW], blocks[RBW].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
        Assert.fail("Should not have appended to an RBW replica" + blocks[RBW]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RBW], e.getMessage());
    }
    try {
        dataSet.append(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
        Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RWR], e.getMessage());
    }
    try {
        dataSet.append(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
        Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.UNFINALIZED_REPLICA + blocks[RUR], e.getMessage());
    }
    try {
        dataSet.append(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
        Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertEquals(ReplicaNotFoundException.NON_EXISTENT_REPLICA + blocks[NON_EXISTENT], e.getMessage());
    }
    newGS = blocks[FINALIZED].getGenerationStamp() + 1;
    dataSet.recoverAppend(blocks[FINALIZED], newGS, // successful
    blocks[FINALIZED].getNumBytes());
    blocks[FINALIZED].setGenerationStamp(newGS);
    try {
        dataSet.recoverAppend(blocks[TEMPORARY], blocks[TEMPORARY].getGenerationStamp() + 1, blocks[TEMPORARY].getNumBytes());
        Assert.fail("Should not have appended to a temporary replica " + blocks[TEMPORARY]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
    }
    newGS = blocks[RBW].getGenerationStamp() + 1;
    dataSet.recoverAppend(blocks[RBW], newGS, blocks[RBW].getNumBytes());
    blocks[RBW].setGenerationStamp(newGS);
    try {
        dataSet.recoverAppend(blocks[RWR], blocks[RWR].getGenerationStamp() + 1, blocks[RBW].getNumBytes());
        Assert.fail("Should not have appended to an RWR replica" + blocks[RWR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
    }
    try {
        dataSet.recoverAppend(blocks[RUR], blocks[RUR].getGenerationStamp() + 1, blocks[RUR].getNumBytes());
        Assert.fail("Should not have appended to an RUR replica" + blocks[RUR]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA));
    }
    try {
        dataSet.recoverAppend(blocks[NON_EXISTENT], blocks[NON_EXISTENT].getGenerationStamp(), blocks[NON_EXISTENT].getNumBytes());
        Assert.fail("Should not have appended to a non-existent replica " + blocks[NON_EXISTENT]);
    } catch (ReplicaNotFoundException e) {
        Assert.assertTrue(e.getMessage().startsWith(ReplicaNotFoundException.NON_EXISTENT_REPLICA));
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)

Example 8 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImplTestUtils method getMaterializedReplica.

/**
   * Return a materialized replica from the FsDatasetImpl.
   */
@Override
public MaterializedReplica getMaterializedReplica(ExtendedBlock block) throws ReplicaNotFoundException {
    File blockFile;
    try {
        ReplicaInfo r = dataset.getReplicaInfo(block);
        blockFile = new File(r.getBlockURI());
    } catch (IOException e) {
        LOG.error("Block file for " + block + " does not existed:", e);
        throw new ReplicaNotFoundException(block);
    }
    File metaFile = FsDatasetUtil.getMetaFile(blockFile, block.getGenerationStamp());
    return new FsDatasetImplMaterializedReplica(blockFile, metaFile);
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 9 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method recoverCheck.

private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException, MustStopExistingWriter {
    ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
    // check state
    if (replicaInfo.getState() != ReplicaState.FINALIZED && replicaInfo.getState() != ReplicaState.RBW) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
    }
    // check generation stamp
    long replicaGenerationStamp = replicaInfo.getGenerationStamp();
    if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
    }
    // stop the previous writer before check a replica's length
    long replicaLen = replicaInfo.getNumBytes();
    if (replicaInfo.getState() == ReplicaState.RBW) {
        ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
        if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
            throw new MustStopExistingWriter(rbw);
        }
        // check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
        if (replicaLen != rbw.getBytesOnDisk() || replicaLen != rbw.getBytesAcked()) {
            throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo + "bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" + rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() + ") are not the same.");
        }
    }
    // check block length
    if (replicaLen != expectedBlockLen) {
        throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaLen + " expected length is " + expectedBlockLen);
    }
    return replicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 10 with ReplicaNotFoundException

use of org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException in project hadoop by apache.

the class FsDatasetImpl method convertTemporaryToRbw.

// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final long blockId = b.getBlockId();
        final long expectedGs = b.getGenerationStamp();
        final long visible = b.getNumBytes();
        LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
        final ReplicaInfo temp;
        {
            // get replica
            final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
            if (r == null) {
                throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
            }
            // check the replica's state
            if (r.getState() != ReplicaState.TEMPORARY) {
                throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
            }
            temp = r;
        }
        // check generation stamp
        if (temp.getGenerationStamp() != expectedGs) {
            throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
        }
        // TODO: check writer?
        // set writer to the current thread
        // temp.setWriter(Thread.currentThread());
        // check length
        final long numBytes = temp.getNumBytes();
        if (numBytes < visible) {
            throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
        }
        // check volume
        final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
        if (v == null) {
            throw new IOException("r.getVolume() = null, temp=" + temp);
        }
        final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
        if (rbw.getState() != ReplicaState.RBW) {
            throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
        }
        // overwrite the RBW in the volume map
        volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
        return rbw;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Aggregations

ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)14 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)10 IOException (java.io.IOException)9 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)8 MultipleIOException (org.apache.hadoop.io.MultipleIOException)7 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)4 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)3 File (java.io.File)2 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)2 EOFException (java.io.EOFException)1 FileNotFoundException (java.io.FileNotFoundException)1 InterruptedIOException (java.io.InterruptedIOException)1 RandomAccessFile (java.io.RandomAccessFile)1 LinkedList (java.util.LinkedList)1 DfsClientConf (org.apache.hadoop.hdfs.client.impl.DfsClientConf)1 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)1 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1