Search in sources :

Example 6 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method moveBlockAcrossStorage.

/**
   * Move block files from one storage to another storage.
   * @return Returns the Old replicaInfo
   * @throws IOException
   */
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, StorageType targetStorageType) throws IOException {
    ReplicaInfo replicaInfo = getReplicaInfo(block);
    if (replicaInfo.getState() != ReplicaState.FINALIZED) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
    }
    if (replicaInfo.getNumBytes() != block.getNumBytes()) {
        throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + block.getNumBytes());
    }
    if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
        throw new ReplicaAlreadyExistsException("Replica " + replicaInfo + " already exists on storage " + targetStorageType);
    }
    if (replicaInfo.isOnTransientStorage()) {
        // Block movement from RAM_DISK will be done by LazyPersist mechanism
        throw new IOException("Replica " + replicaInfo + " cannot be moved from storageType : " + replicaInfo.getVolume().getStorageType());
    }
    FsVolumeReference volumeRef = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        volumeRef = volumes.getNextVolume(targetStorageType, block.getNumBytes());
    }
    try {
        moveBlock(block, replicaInfo, volumeRef);
    } finally {
        if (volumeRef != null) {
            volumeRef.close();
        }
    }
    // Replace the old block if any to reschedule the scanning.
    return replicaInfo;
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 7 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsVolumeImpl method incDfsUsedAndNumBlocks.

void incDfsUsedAndNumBlocks(String bpid, long value) {
    try (AutoCloseableLock lock = dataset.acquireDatasetLock()) {
        BlockPoolSlice bp = bpSlices.get(bpid);
        if (bp != null) {
            bp.incDfsUsed(value);
            bp.incrNumBlocks();
        }
    }
}
Also used : AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 8 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method finalizeReplica.

private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ReplicaInfo newReplicaInfo = null;
        if (replicaInfo.getState() == ReplicaState.RUR && replicaInfo.getOriginalReplica().getState() == ReplicaState.FINALIZED) {
            newReplicaInfo = replicaInfo.getOriginalReplica();
        } else {
            FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
            if (v == null) {
                throw new IOException("No volume for block " + replicaInfo);
            }
            newReplicaInfo = v.addFinalizedBlock(bpid, replicaInfo, replicaInfo, replicaInfo.getBytesReserved());
            if (v.isTransientStorage()) {
                releaseLockedMemory(replicaInfo.getOriginalBytesReserved() - replicaInfo.getNumBytes(), false);
                ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
                datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
            }
        }
        assert newReplicaInfo.getState() == ReplicaState.FINALIZED : "Replica should be finalized";
        volumeMap.add(bpid, newReplicaInfo);
        return newReplicaInfo;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 9 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method moveBlock.

/**
   * Moves a block from a given volume to another.
   *
   * @param block       - Extended Block
   * @param replicaInfo - ReplicaInfo
   * @param volumeRef   - Volume Ref - Closed by caller.
   * @return newReplicaInfo
   * @throws IOException
   */
private ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo, FsVolumeReference volumeRef) throws IOException {
    FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
    // Copy files to temp dir first
    ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block, replicaInfo, smallBufferSize, conf);
    // Finalize the copied files
    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // Increment numBlocks here as this block moved without knowing to BPS
        FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
        volume.incrNumBlocks(block.getBlockPoolId());
    }
    removeOldReplica(replicaInfo, newReplicaInfo, block.getBlockPoolId());
    return newReplicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 10 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method getBlockLocalPathInfo.

// FsDatasetSpi
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
        if (replica == null) {
            throw new ReplicaNotFoundException(block);
        }
        if (replica.getGenerationStamp() < block.getGenerationStamp()) {
            throw new IOException("Replica generation stamp < block generation stamp, block=" + block + ", replica=" + replica);
        } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
            block.setGenerationStamp(replica.getGenerationStamp());
        }
    }
    ReplicaInfo r = getBlockReplica(block);
    File blockFile = new File(r.getBlockURI());
    File metaFile = new File(r.getMetadataURI());
    BlockLocalPathInfo info = new BlockLocalPathInfo(block, blockFile.getAbsolutePath(), metaFile.toString());
    return info;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) File(java.io.File)

Aggregations

AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)44 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)27 IOException (java.io.IOException)23 MultipleIOException (org.apache.hadoop.io.MultipleIOException)17 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)10 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)9 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)8 File (java.io.File)7 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)5 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)4 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)4 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)3 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)3