Search in sources :

Example 36 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method finalizeBlock.

//
// REMIND - mjc - eventually we should have a timeout system
// in place to clean up block files left by abandoned clients.
// We should have some timer in place, so that if a blockfile
// is created but non-valid, and has been idle for >48 hours,
// we can GC it safely.
//
/**
   * Complete the block write!
   */
// FsDatasetSpi
@Override
public void finalizeBlock(ExtendedBlock b) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        if (Thread.interrupted()) {
            // Don't allow data modifications from interrupted threads
            throw new IOException("Cannot finalize block from Interrupted Thread");
        }
        ReplicaInfo replicaInfo = getReplicaInfo(b);
        if (replicaInfo.getState() == ReplicaState.FINALIZED) {
            // been opened for append but never modified
            return;
        }
        finalizeReplica(b.getBlockPoolId(), replicaInfo);
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 37 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method recoverRbwImpl.

private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        // check generation stamp
        long replicaGenerationStamp = rbw.getGenerationStamp();
        if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
            throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
        }
        // check replica length
        long bytesAcked = rbw.getBytesAcked();
        long numBytes = rbw.getNumBytes();
        if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd) {
            throw new ReplicaNotFoundException("Unmatched length replica " + rbw + ": BytesAcked = " + bytesAcked + " BytesRcvd = " + numBytes + " are not in the range of [" + minBytesRcvd + ", " + maxBytesRcvd + "].");
        }
        FsVolumeReference ref = rbw.getReplicaInfo().getVolume().obtainReference();
        try {
            // any corrupt data written after the acked length can go unnoticed.
            if (numBytes > bytesAcked) {
                rbw.getReplicaInfo().truncateBlock(bytesAcked);
                rbw.setNumBytes(bytesAcked);
                rbw.setLastChecksumAndDataLen(bytesAcked, null);
            }
            // bump the replica's generation stamp to newGS
            rbw.getReplicaInfo().bumpReplicaGS(newGS);
        } catch (IOException e) {
            IOUtils.cleanup(null, ref);
            throw e;
        }
        return new ReplicaHandler(rbw, ref);
    }
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)

Example 38 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method onCompleteLazyPersist.

@Override
public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
        targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length() + savedFiles[1].length());
        // Update metrics (ignore the metadata file size)
        datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
        datanode.getMetrics().incrRamDiskBytesLazyPersisted(savedFiles[1].length());
        datanode.getMetrics().addRamDiskBlocksLazyPersistWindowMs(Time.monotonicNow() - creationTime);
        if (LOG.isDebugEnabled()) {
            LOG.debug("LazyWriter: Finish persisting RamDisk block: " + " block pool Id: " + bpId + " block id: " + blockId + " to block file " + savedFiles[1] + " and meta file " + savedFiles[0] + " on target volume " + targetVolume);
        }
    }
}
Also used : AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 39 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class FsDatasetImpl method moveBlockAcrossVolumes.

/**
   * Moves a given block from one volume to another volume. This is used by disk
   * balancer.
   *
   * @param block       - ExtendedBlock
   * @param destination - Destination volume
   * @return Old replica info
   */
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi destination) throws IOException {
    ReplicaInfo replicaInfo = getReplicaInfo(block);
    if (replicaInfo.getState() != ReplicaState.FINALIZED) {
        throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
    }
    FsVolumeReference volumeRef = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        volumeRef = destination.obtainReference();
    }
    try {
        moveBlock(block, replicaInfo, volumeRef);
    } finally {
        if (volumeRef != null) {
            volumeRef.close();
        }
    }
    return replicaInfo;
}
Also used : FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 40 with AutoCloseableLock

use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.

the class BlockPoolSlice method readReplicasFromCache.

private boolean readReplicasFromCache(ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap) {
    ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock());
    File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
    // Check whether the file exists or not.
    if (!replicaFile.exists()) {
        LOG.info("Replica Cache file: " + replicaFile.getPath() + " doesn't exist ");
        return false;
    }
    long fileLastModifiedTime = replicaFile.lastModified();
    if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
        LOG.info("Replica Cache file: " + replicaFile.getPath() + " has gone stale");
        // Just to make findbugs happy
        if (!replicaFile.delete()) {
            LOG.info("Replica Cache file: " + replicaFile.getPath() + " cannot be deleted");
        }
        return false;
    }
    FileInputStream inputStream = null;
    try {
        inputStream = fileIoProvider.getFileInputStream(volume, replicaFile);
        BlockListAsLongs blocksList = BlockListAsLongs.readFrom(inputStream, maxDataLength);
        if (blocksList == null) {
            return false;
        }
        for (BlockReportReplica replica : blocksList) {
            switch(replica.getState()) {
                case FINALIZED:
                    addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
                    break;
                case RUR:
                case RBW:
                case RWR:
                    addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
                    break;
                default:
                    break;
            }
        }
        inputStream.close();
        // to scan all the files on disk.
        for (Iterator<ReplicaInfo> iter = tmpReplicaMap.replicas(bpid).iterator(); iter.hasNext(); ) {
            ReplicaInfo info = iter.next();
            // We use a lightweight GSet to store replicaInfo, we need to remove
            // it from one GSet before adding to another.
            iter.remove();
            volumeMap.add(bpid, info);
        }
        LOG.info("Successfully read replica from cache file : " + replicaFile.getPath());
        return true;
    } catch (Exception e) {
        // Any exception we need to revert back to read from disk
        // Log the error and return false
        LOG.info("Exception occured while reading the replicas cache file: " + replicaFile.getPath(), e);
        return false;
    } finally {
        if (!fileIoProvider.delete(volume, replicaFile)) {
            LOG.info("Failed to delete replica cache file: " + replicaFile.getPath());
        }
        // close the inputStream
        IOUtils.closeStream(inputStream);
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) FileInputStream(java.io.FileInputStream) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException)

Aggregations

AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)44 ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)27 IOException (java.io.IOException)23 MultipleIOException (org.apache.hadoop.io.MultipleIOException)17 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)10 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)9 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)8 File (java.io.File)7 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)5 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)5 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)4 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)4 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)3 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)3