Search in sources :

Example 1 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method createRbw.

// FsDatasetSpi
@Override
public ReplicaHandler createRbw(StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
        if (replicaInfo != null) {
            throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created.");
        }
        // create a new block
        FsVolumeReference ref = null;
        // significantly.
        if (allowLazyPersist && lazyWriter != null && b.getNumBytes() % cacheManager.getOsPageSize() == 0 && reserveLockedMemory(b.getNumBytes())) {
            try {
                // First try to place the block on a transient volume.
                ref = volumes.getNextTransientVolume(b.getNumBytes());
                datanode.getMetrics().incrRamDiskBlocksWrite();
            } catch (DiskOutOfSpaceException de) {
            // Ignore the exception since we just fall back to persistent storage.
            } finally {
                if (ref == null) {
                    cacheManager.release(b.getNumBytes());
                }
            }
        }
        if (ref == null) {
            ref = volumes.getNextVolume(storageType, b.getNumBytes());
        }
        FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
        if (allowLazyPersist && !v.isTransientStorage()) {
            datanode.getMetrics().incrRamDiskBlocksWriteFallback();
        }
        ReplicaInPipeline newReplicaInfo;
        try {
            newReplicaInfo = v.createRbw(b);
            if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
                throw new IOException("CreateRBW returned a replica of state " + newReplicaInfo.getReplicaInfo().getState() + " for block " + b.getBlockId());
            }
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
        return new ReplicaHandler(newReplicaInfo, ref);
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 2 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method invalidate.

/**
   * We're informed that a block is no longer valid. Delete it.
   */
// FsDatasetSpi
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
    final List<String> errors = new ArrayList<String>();
    for (int i = 0; i < invalidBlks.length; i++) {
        final ReplicaInfo removing;
        final FsVolumeImpl v;
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
            if (info == null) {
                ReplicaInfo infoByBlockId = volumeMap.get(bpid, invalidBlks[i].getBlockId());
                if (infoByBlockId == null) {
                    // It is okay if the block is not found -- it
                    // may be deleted earlier.
                    LOG.info("Failed to delete replica " + invalidBlks[i] + ": ReplicaInfo not found.");
                } else {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ": GenerationStamp not matched, existing replica is " + Block.toString(infoByBlockId));
                }
                continue;
            }
            v = (FsVolumeImpl) info.getVolume();
            if (v == null) {
                errors.add("Failed to delete replica " + invalidBlks[i] + ". No volume for replica " + info);
                continue;
            }
            try {
                File blockFile = new File(info.getBlockURI());
                if (blockFile != null && blockFile.getParentFile() == null) {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ". Parent not found for block file: " + blockFile);
                    continue;
                }
            } catch (IllegalArgumentException e) {
                LOG.warn("Parent directory check failed; replica " + info + " is not backed by a local file");
            }
            removing = volumeMap.remove(bpid, invalidBlks[i]);
            addDeletingBlock(bpid, removing.getBlockId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Block file " + removing.getBlockURI() + " is to be deleted");
            }
            if (removing instanceof ReplicaInPipeline) {
                ((ReplicaInPipeline) removing).releaseAllBytesReserved();
            }
        }
        if (v.isTransientStorage()) {
            RamDiskReplica replicaInfo = ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
            if (replicaInfo != null) {
                if (!replicaInfo.getIsPersisted()) {
                    datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
                }
                ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(), replicaInfo.getBlockId(), true);
            }
        }
        // If a DFSClient has the replica in its cache of short-circuit file
        // descriptors (and the client is using ShortCircuitShm), invalidate it.
        datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
        // If the block is cached, start uncaching it.
        cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
        // finishes.
        try {
            asyncDiskService.deleteAsync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
        } catch (ClosedChannelException e) {
            LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + "block " + invalidBlks[i]);
        }
    }
    if (!errors.isEmpty()) {
        StringBuilder b = new StringBuilder("Failed to delete ").append(errors.size()).append(" (out of ").append(invalidBlks.length).append(") replica(s):");
        for (int i = 0; i < errors.size(); i++) {
            b.append("\n").append(i).append(") ").append(errors.get(i));
        }
        throw new IOException(b.toString());
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) File(java.io.File) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica)

Example 3 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method recoverAppend.

// FsDatasetSpi
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    LOG.info("Recover failed append to " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
                FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
                ReplicaInPipeline replica;
                try {
                    // change the replica's state/gs etc.
                    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
                        replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
                    } else {
                        //RBW
                        replicaInfo.bumpReplicaGS(newGS);
                        replica = (ReplicaInPipeline) replicaInfo;
                    }
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                return new ReplicaHandler(replica, ref);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 4 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method append.

// FsDatasetSpi
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check the validity of the parameter
        if (newGS < b.getGenerationStamp()) {
            throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp");
        }
        ReplicaInfo replicaInfo = getReplicaInfo(b);
        LOG.info("Appending to " + replicaInfo);
        if (replicaInfo.getState() != ReplicaState.FINALIZED) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
        }
        if (replicaInfo.getNumBytes() != expectedBlockLen) {
            throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen);
        }
        FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
        ReplicaInPipeline replica = null;
        try {
            replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(replica, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 5 with ReplicaInPipeline

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline in project hadoop by apache.

the class FsDatasetImpl method recoverRbw.

// FsDatasetSpi
@Override
public ReplicaHandler recoverRbw(ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    LOG.info("Recover RBW replica " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
                // check the replica's state
                if (replicaInfo.getState() != ReplicaState.RBW) {
                    throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
                }
                ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
                if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
                    throw new MustStopExistingWriter(rbw);
                }
                LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
                return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Aggregations

ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)14 IOException (java.io.IOException)12 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)10 MultipleIOException (org.apache.hadoop.io.MultipleIOException)10 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)9 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)5 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)4 DiskOutOfSpaceException (org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException)2 File (java.io.File)1 ClosedChannelException (java.nio.channels.ClosedChannelException)1 ArrayList (java.util.ArrayList)1 Random (java.util.Random)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 ExtendedBlockId (org.apache.hadoop.hdfs.ExtendedBlockId)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1