Search in sources :

Example 11 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method moveBlock.

/**
   * Moves a block from a given volume to another.
   *
   * @param block       - Extended Block
   * @param replicaInfo - ReplicaInfo
   * @param volumeRef   - Volume Ref - Closed by caller.
   * @return newReplicaInfo
   * @throws IOException
   */
private ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo, FsVolumeReference volumeRef) throws IOException {
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block, replicaInfo, smallBufferSize, conf);
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // Increment numBlocks here as this block moved without knowing to BPS
        FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
        volume.incrNumBlocks(block.getBlockPoolId());
    }
    removeOldReplica(replicaInfo, newReplicaInfo, block.getBlockPoolId());
    return newReplicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 12 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method getBlockLocalPathInfo.

// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
        if (replica == null) {
            throw new ReplicaNotFoundException(block);
        }
        if (replica.getGenerationStamp() < block.getGenerationStamp()) {
            throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
        } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
            block.setGenerationStamp(replica.getGenerationStamp());
        }
    }
    ReplicaInfo r = getBlockReplica(block);
    File blockFile = new File(r.getBlockURI());
    File metaFile = new File(r.getMetadataURI());
    BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
    return info;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) File(java.io.File)

Example 13 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method createRbw.

// FsDatasetSpi
@Override
public ReplicaHandler createRbw(StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
        if (replicaInfo != null) {
            throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created.");
        }
        // create a new block
        FsVolumeReference ref = null;
        // significantly.
        if (allowLazyPersist && lazyWriter != null && b.getNumBytes() % cacheManager.getOsPageSize() == 0 && reserveLockedMemory(b.getNumBytes())) {
            try {
                // First try to place the block on a transient volume.
                ref = volumes.getNextTransientVolume(b.getNumBytes());
                datanode.getMetrics().incrRamDiskBlocksWrite();
            } catch (DiskOutOfSpaceException de) {
            // Ignore the exception since we just fall back to persistent storage.
            } finally {
                if (ref == null) {
                    cacheManager.release(b.getNumBytes());
                }
            }
        }
        if (ref == null) {
            ref = volumes.getNextVolume(storageType, b.getNumBytes());
        }
        FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
        if (allowLazyPersist && !v.isTransientStorage()) {
            datanode.getMetrics().incrRamDiskBlocksWriteFallback();
        }
        ReplicaInPipeline newReplicaInfo;
        try {
            newReplicaInfo = v.createRbw(b);
            if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
                throw new IOException("CreateRBW returned a replica of state " + newReplicaInfo.getReplicaInfo().getState() + " for block " + b.getBlockId());
            }
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
        return new ReplicaHandler(newReplicaInfo, ref);
    }
}
Also used : DiskOutOfSpaceException(org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 14 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method invalidate.

/**
   * We're informed that a block is no longer valid. Delete it.
   */
// FsDatasetSpi
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
    final List<String> errors = new ArrayList<String>();
    for (int i = 0; i < invalidBlks.length; i++) {
        final ReplicaInfo removing;
        final FsVolumeImpl v;
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
            if (info == null) {
                ReplicaInfo infoByBlockId = volumeMap.get(bpid, invalidBlks[i].getBlockId());
                if (infoByBlockId == null) {
                    // It is okay if the block is not found -- it
                    // may be deleted earlier.
                    LOG.info("Failed to delete replica " + invalidBlks[i] + ": ReplicaInfo not found.");
                } else {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ": GenerationStamp not matched, existing replica is " + Block.toString(infoByBlockId));
                }
                continue;
            }
            v = (FsVolumeImpl) info.getVolume();
            if (v == null) {
                errors.add("Failed to delete replica " + invalidBlks[i] + ". No volume for replica " + info);
                continue;
            }
            try {
                File blockFile = new File(info.getBlockURI());
                if (blockFile != null && blockFile.getParentFile() == null) {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ". Parent not found for block file: " + blockFile);
                    continue;
                }
            } catch (IllegalArgumentException e) {
                LOG.warn("Parent directory check failed; replica " + info + " is not backed by a local file");
            }
            removing = volumeMap.remove(bpid, invalidBlks[i]);
            addDeletingBlock(bpid, removing.getBlockId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Block file " + removing.getBlockURI() + " is to be deleted");
            }
            if (removing instanceof ReplicaInPipeline) {
                ((ReplicaInPipeline) removing).releaseAllBytesReserved();
            }
        }
        if (v.isTransientStorage()) {
            RamDiskReplica replicaInfo = ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
            if (replicaInfo != null) {
                if (!replicaInfo.getIsPersisted()) {
                    datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
                }
                ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(), replicaInfo.getBlockId(), true);
            }
        }
        // If a DFSClient has the replica in its cache of short-circuit file
        // descriptors (and the client is using ShortCircuitShm), invalidate it.
        datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
        // If the block is cached, start uncaching it.
        cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
        // finishes.
        try {
            asyncDiskService.deleteAsync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
        } catch (ClosedChannelException e) {
            LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + "block " + invalidBlks[i]);
        }
    }
    if (!errors.isEmpty()) {
        StringBuilder b = new StringBuilder("Failed to delete ").append(errors.size()).append(" (out of ").append(invalidBlks.length).append(") replica(s):");
        for (int i = 0; i < errors.size(); i++) {
            b.append("\n").append(i).append(") ").append(errors.get(i));
        }
        throw new IOException(b.toString());
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) File(java.io.File) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica)

Example 15 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method recoverAppend.

// FsDatasetSpi
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
    LOG.info("Recover failed append to " + b);
    while (true) {
        try {
            try (AutoCloseableLock lock = datasetLock.acquire()) {
                ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
                FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
                ReplicaInPipeline replica;
                try {
                    // change the replica's state/gs etc.
                    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
                        replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
                    } else {
                        //RBW
                        replicaInfo.bumpReplicaGS(newGS);
                        replica = (ReplicaInPipeline) replicaInfo;
                    }
                } catch (IOException e) {
                    IOUtils.cleanup(null, ref);
                    throw e;
                }
                return new ReplicaHandler(replica, ref);
            }
        } catch (MustStopExistingWriter e) {
            e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
        }
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Aggregations

AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)44 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)27 IOException (java.io.IOException)23 MultipleIOException (org.apache.hadoop.io.MultipleIOException)17 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)10 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)9 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)8 File (java.io.File)7 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)5 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)4 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)4 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)3 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)3