use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method finalizeBlock.
//
// REMIND - mjc - eventually we should have a timeout system
// in place to clean up block files left by abandoned clients.
// We should have some timer in place, so that if a blockfile
// is created but non-valid, and has been idle for >48 hours,
// we can GC it safely.
//
/**
* Complete the block write!
*/
// FsDatasetSpi
@Override
public void finalizeBlock(ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
if (Thread.interrupted()) {
// Don't allow data modifications from interrupted threads
throw new IOException("Cannot finalize block from Interrupted Thread");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
// been opened for append but never modified
return;
}
finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method recoverRbwImpl.
private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// check generation stamp
long replicaGenerationStamp = rbw.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() || replicaGenerationStamp > newGS) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b + ". Expected GS range is [" + b.getGenerationStamp() + ", " + newGS + "].");
}
// check replica length
long bytesAcked = rbw.getBytesAcked();
long numBytes = rbw.getNumBytes();
if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd) {
throw new ReplicaNotFoundException("Unmatched length replica " + rbw + ": BytesAcked = " + bytesAcked + " BytesRcvd = " + numBytes + " are not in the range of [" + minBytesRcvd + ", " + maxBytesRcvd + "].");
}
FsVolumeReference ref = rbw.getReplicaInfo().getVolume().obtainReference();
try {
// any corrupt data written after the acked length can go unnoticed.
if (numBytes > bytesAcked) {
rbw.getReplicaInfo().truncateBlock(bytesAcked);
rbw.setNumBytes(bytesAcked);
rbw.setLastChecksumAndDataLen(bytesAcked, null);
}
// bump the replica's generation stamp to newGS
rbw.getReplicaInfo().bumpReplicaGS(newGS);
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(rbw, ref);
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method onCompleteLazyPersist.
@Override
public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length() + savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
datanode.getMetrics().incrRamDiskBytesLazyPersisted(savedFiles[1].length());
datanode.getMetrics().addRamDiskBlocksLazyPersistWindowMs(Time.monotonicNow() - creationTime);
if (LOG.isDebugEnabled()) {
LOG.debug("LazyWriter: Finish persisting RamDisk block: " + " block pool Id: " + bpId + " block id: " + blockId + " to block file " + savedFiles[1] + " and meta file " + savedFiles[0] + " on target volume " + targetVolume);
}
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method moveBlockAcrossVolumes.
/**
* Moves a given block from one volume to another volume. This is used by disk
* balancer.
*
* @param block - ExtendedBlock
* @param destination - Destination volume
* @return Old replica info
*/
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi destination) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(block);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
}
FsVolumeReference volumeRef = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumeRef = destination.obtainReference();
}
try {
moveBlock(block, replicaInfo, volumeRef);
} finally {
if (volumeRef != null) {
volumeRef.close();
}
}
return replicaInfo;
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class BlockPoolSlice method readReplicasFromCache.
private boolean readReplicasFromCache(ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap) {
ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock());
File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
// Check whether the file exists or not.
if (!replicaFile.exists()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " doesn't exist ");
return false;
}
long fileLastModifiedTime = replicaFile.lastModified();
if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " has gone stale");
// Just to make findbugs happy
if (!replicaFile.delete()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " cannot be deleted");
}
return false;
}
FileInputStream inputStream = null;
try {
inputStream = fileIoProvider.getFileInputStream(volume, replicaFile);
BlockListAsLongs blocksList = BlockListAsLongs.readFrom(inputStream, maxDataLength);
if (blocksList == null) {
return false;
}
for (BlockReportReplica replica : blocksList) {
switch(replica.getState()) {
case FINALIZED:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
break;
case RUR:
case RBW:
case RWR:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
break;
default:
break;
}
}
inputStream.close();
// to scan all the files on disk.
for (Iterator<ReplicaInfo> iter = tmpReplicaMap.replicas(bpid).iterator(); iter.hasNext(); ) {
ReplicaInfo info = iter.next();
// We use a lightweight GSet to store replicaInfo, we need to remove
// it from one GSet before adding to another.
iter.remove();
volumeMap.add(bpid, info);
}
LOG.info("Successfully read replica from cache file : " + replicaFile.getPath());
return true;
} catch (Exception e) {
// Any exception we need to revert back to read from disk
// Log the error and return false
LOG.info("Exception occured while reading the replicas cache file: " + replicaFile.getPath(), e);
return false;
} finally {
if (!fileIoProvider.delete(volume, replicaFile)) {
LOG.info("Failed to delete replica cache file: " + replicaFile.getPath());
}
// close the inputStream
IOUtils.closeStream(inputStream);
}
}
Aggregations