use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method onCompleteLazyPersist.
@Override
public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length() + savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
datanode.getMetrics().incrRamDiskBytesLazyPersisted(savedFiles[1].length());
datanode.getMetrics().addRamDiskBlocksLazyPersistWindowMs(Time.monotonicNow() - creationTime);
if (LOG.isDebugEnabled()) {
LOG.debug("LazyWriter: Finish persisting RamDisk block: " + " block pool Id: " + bpId + " block id: " + blockId + " to block file " + savedFiles[1] + " and meta file " + savedFiles[0] + " on target volume " + targetVolume);
}
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class FsDatasetImpl method moveBlockAcrossVolumes.
/**
* Moves a given block from one volume to another volume. This is used by disk
* balancer.
*
* @param block - ExtendedBlock
* @param destination - Destination volume
* @return Old replica info
*/
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi destination) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(block);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
}
FsVolumeReference volumeRef = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
volumeRef = destination.obtainReference();
}
try {
moveBlock(block, replicaInfo, volumeRef);
} finally {
if (volumeRef != null) {
volumeRef.close();
}
}
return replicaInfo;
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class BlockPoolSlice method readReplicasFromCache.
private boolean readReplicasFromCache(ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap) {
ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock());
File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
// Check whether the file exists or not.
if (!replicaFile.exists()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " doesn't exist ");
return false;
}
long fileLastModifiedTime = replicaFile.lastModified();
if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " has gone stale");
// Just to make findbugs happy
if (!replicaFile.delete()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() + " cannot be deleted");
}
return false;
}
FileInputStream inputStream = null;
try {
inputStream = fileIoProvider.getFileInputStream(volume, replicaFile);
BlockListAsLongs blocksList = BlockListAsLongs.readFrom(inputStream, maxDataLength);
if (blocksList == null) {
return false;
}
for (BlockReportReplica replica : blocksList) {
switch(replica.getState()) {
case FINALIZED:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
break;
case RUR:
case RBW:
case RWR:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
break;
default:
break;
}
}
inputStream.close();
// to scan all the files on disk.
for (Iterator<ReplicaInfo> iter = tmpReplicaMap.replicas(bpid).iterator(); iter.hasNext(); ) {
ReplicaInfo info = iter.next();
// We use a lightweight GSet to store replicaInfo, we need to remove
// it from one GSet before adding to another.
iter.remove();
volumeMap.add(bpid, info);
}
LOG.info("Successfully read replica from cache file : " + replicaFile.getPath());
return true;
} catch (Exception e) {
// Any exception we need to revert back to read from disk
// Log the error and return false
LOG.info("Exception occured while reading the replicas cache file: " + replicaFile.getPath(), e);
return false;
} finally {
if (!fileIoProvider.delete(volume, replicaFile)) {
LOG.info("Failed to delete replica cache file: " + replicaFile.getPath());
}
// close the inputStream
IOUtils.closeStream(inputStream);
}
}
use of org.apache.hadoop.util.AutoCloseableLock in project hadoop by apache.
the class ReplicaMap method add.
/**
* Add a replica's meta information into the map
*
* @param bpid block pool id
* @param replicaInfo a replica's meta information
* @return previous meta information of the replica
* @throws IllegalArgumentException if the input parameter is null
*/
ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) {
checkBlockPool(bpid);
checkBlock(replicaInfo);
try (AutoCloseableLock l = lock.acquire()) {
FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
if (set == null) {
// Add an entry for block pool if it does not exist already
set = new FoldedTreeSet<>();
map.put(bpid, set);
}
return set.addOrReplace(replicaInfo);
}
}
Aggregations