use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class BlockPoolSlice method activateSavedReplica.
/**
* Move a persisted replica from lazypersist directory to a subdirectory
* under finalized.
*/
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo, RamDiskReplica replicaState) throws IOException {
File metaFile = replicaState.getSavedMetaFile();
File blockFile = replicaState.getSavedBlockFile();
final long blockId = replicaInfo.getBlockId();
final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
final File targetBlockFile = new File(blockDir, blockFile.getName());
final File targetMetaFile = new File(blockDir, metaFile.getName());
fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(replicaInfo.getBytesOnDisk()).setGenerationStamp(replicaInfo.getGenerationStamp()).setFsVolume(replicaState.getLazyPersistVolume()).setDirectoryToUse(targetBlockFile.getParentFile()).build();
return newReplicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class BlockPoolSlice method addReplicaToReplicasMap.
private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized) throws IOException {
ReplicaInfo newReplica = null;
long blockId = block.getBlockId();
long genStamp = block.getGenerationStamp();
if (isFinalized) {
newReplica = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(block.getNumBytes()).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(DatanodeUtil.idToBlockDir(finalizedDir, blockId)).build();
} else {
File file = new File(rbwDir, block.getBlockName());
boolean loadRwr = true;
File restartMeta = new File(file.getParent() + File.pathSeparator + "." + file.getName() + ".restart");
Scanner sc = null;
try {
sc = new Scanner(restartMeta, "UTF-8");
// The restart meta file exists
if (sc.hasNextLong() && (sc.nextLong() > timer.now())) {
// It didn't expire. Load the replica as a RBW.
// We don't know the expected block length, so just use 0
// and don't reserve any more space for writes.
newReplica = new ReplicaBuilder(ReplicaState.RBW).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile()).setWriterThread(null).setBytesToReserve(0).build();
loadRwr = false;
}
sc.close();
if (!fileIoProvider.delete(volume, restartMeta)) {
FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
}
} catch (FileNotFoundException fnfe) {
// nothing to do hereFile dir =
} finally {
if (sc != null) {
sc.close();
}
}
// Restart meta doesn't exist or expired.
if (loadRwr) {
ReplicaBuilder builder = new ReplicaBuilder(ReplicaState.RWR).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile());
newReplica = builder.build();
}
}
ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
if (oldReplica == null) {
volumeMap.add(bpid, newReplica);
} else {
// We have multiple replicas of the same block so decide which one
// to keep.
newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
}
// eventually.
if (newReplica.getVolume().isTransientStorage()) {
lazyWriteReplicaMap.addReplica(bpid, blockId, (FsVolumeImpl) newReplica.getVolume(), 0);
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
if (oldReplica == null) {
incrNumBlocks();
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class TestFsDatasetImpl method testDeletingBlocks.
@Test
public void testDeletingBlocks() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
FsVolumeImpl vol;
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
vol = (FsVolumeImpl) volumes.get(0);
}
ExtendedBlock eb;
ReplicaInfo info;
List<Block> blockList = new ArrayList<>();
for (int i = 1; i <= 63; i++) {
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
}
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
blockList.clear();
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class ReplicaMap method initBlockPool.
void initBlockPool(String bpid) {
checkBlockPool(bpid);
try (AutoCloseableLock l = lock.acquire()) {
FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
if (set == null) {
// Add an entry for block pool if it does not exist already
set = new FoldedTreeSet<>();
map.put(bpid, set);
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class ReplicaMap method remove.
/**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
* @param bpid block pool id
* @param block block with its id as the key
* @return the removed replica's meta information
* @throws IllegalArgumentException if the input block is null
*/
ReplicaInfo remove(String bpid, Block block) {
checkBlockPool(bpid);
checkBlock(block);
try (AutoCloseableLock l = lock.acquire()) {
FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
if (set != null) {
ReplicaInfo replicaInfo = set.get(block.getBlockId(), LONG_AND_BLOCK_COMPARATOR);
if (replicaInfo != null && block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
return set.removeAndGet(replicaInfo);
}
}
}
return null;
}
Aggregations