Search in sources :

Example 1 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class BlockPoolSlice method activateSavedReplica.

/**
   * Move a persisted replica from lazypersist directory to a subdirectory
   * under finalized.
   */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo, RamDiskReplica replicaState) throws IOException {
    File metaFile = replicaState.getSavedMetaFile();
    File blockFile = replicaState.getSavedBlockFile();
    final long blockId = replicaInfo.getBlockId();
    final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
    final File targetBlockFile = new File(blockDir, blockFile.getName());
    final File targetMetaFile = new File(blockDir, metaFile.getName());
    fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
    FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
    fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
    FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
    ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(replicaInfo.getBytesOnDisk()).setGenerationStamp(replicaInfo.getGenerationStamp()).setFsVolume(replicaState.getLazyPersistVolume()).setDirectoryToUse(targetBlockFile.getParentFile()).build();
    return newReplicaInfo;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 2 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class BlockPoolSlice method addReplicaToReplicasMap.

private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap, boolean isFinalized) throws IOException {
    ReplicaInfo newReplica = null;
    long blockId = block.getBlockId();
    long genStamp = block.getGenerationStamp();
    if (isFinalized) {
        newReplica = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(block.getNumBytes()).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(DatanodeUtil.idToBlockDir(finalizedDir, blockId)).build();
    } else {
        File file = new File(rbwDir, block.getBlockName());
        boolean loadRwr = true;
        File restartMeta = new File(file.getParent() + File.pathSeparator + "." + file.getName() + ".restart");
        Scanner sc = null;
        try {
            sc = new Scanner(restartMeta, "UTF-8");
            // The restart meta file exists
            if (sc.hasNextLong() && (sc.nextLong() > timer.now())) {
                // It didn't expire. Load the replica as a RBW.
                // We don't know the expected block length, so just use 0
                // and don't reserve any more space for writes.
                newReplica = new ReplicaBuilder(ReplicaState.RBW).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile()).setWriterThread(null).setBytesToReserve(0).build();
                loadRwr = false;
            }
            sc.close();
            if (!fileIoProvider.delete(volume, restartMeta)) {
                FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " + restartMeta.getPath());
            }
        } catch (FileNotFoundException fnfe) {
        // nothing to do hereFile dir =
        } finally {
            if (sc != null) {
                sc.close();
            }
        }
        // Restart meta doesn't exist or expired.
        if (loadRwr) {
            ReplicaBuilder builder = new ReplicaBuilder(ReplicaState.RWR).setBlockId(blockId).setLength(validateIntegrityAndSetLength(file, genStamp)).setGenerationStamp(genStamp).setFsVolume(volume).setDirectoryToUse(file.getParentFile());
            newReplica = builder.build();
        }
    }
    ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
    if (oldReplica == null) {
        volumeMap.add(bpid, newReplica);
    } else {
        // We have multiple replicas of the same block so decide which one
        // to keep.
        newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
    }
    // eventually.
    if (newReplica.getVolume().isTransientStorage()) {
        lazyWriteReplicaMap.addReplica(bpid, blockId, (FsVolumeImpl) newReplica.getVolume(), 0);
    } else {
        lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
    }
    if (oldReplica == null) {
        incrNumBlocks();
    }
}
Also used : Scanner(java.util.Scanner) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) FileNotFoundException(java.io.FileNotFoundException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File)

Example 3 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class TestFsDatasetImpl method testDeletingBlocks.

@Test
public void testDeletingBlocks() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
        ds.addBlockPool(BLOCKPOOL, conf);
        FsVolumeImpl vol;
        try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
            vol = (FsVolumeImpl) volumes.get(0);
        }
        ExtendedBlock eb;
        ReplicaInfo info;
        List<Block> blockList = new ArrayList<>();
        for (int i = 1; i <= 63; i++) {
            eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
            cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
            blockList.add(eb.getLocalBlock());
        }
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
        blockList.clear();
        eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
        cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
        blockList.add(eb.getLocalBlock());
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 4 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class ReplicaMap method initBlockPool.

void initBlockPool(String bpid) {
    checkBlockPool(bpid);
    try (AutoCloseableLock l = lock.acquire()) {
        FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
        if (set == null) {
            // Add an entry for block pool if it does not exist already
            set = new FoldedTreeSet<>();
            map.put(bpid, set);
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Example 5 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class ReplicaMap method remove.

/**
   * Remove the replica's meta information from the map that matches
   * the input block's id and generation stamp
   * @param bpid block pool id
   * @param block block with its id as the key
   * @return the removed replica's meta information
   * @throws IllegalArgumentException if the input block is null
   */
ReplicaInfo remove(String bpid, Block block) {
    checkBlockPool(bpid);
    checkBlock(block);
    try (AutoCloseableLock l = lock.acquire()) {
        FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
        if (set != null) {
            ReplicaInfo replicaInfo = set.get(block.getBlockId(), LONG_AND_BLOCK_COMPARATOR);
            if (replicaInfo != null && block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
                return set.removeAndGet(replicaInfo);
            }
        }
    }
    return null;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock)

Aggregations

ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)48 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)27 IOException (java.io.IOException)19 MultipleIOException (org.apache.hadoop.io.MultipleIOException)16 File (java.io.File)11 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)10 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)10 RandomAccessFile (java.io.RandomAccessFile)7 ReplicaBuilder (org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 Block (org.apache.hadoop.hdfs.protocol.Block)4 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FileInputStream (java.io.FileInputStream)3 FileNotFoundException (java.io.FileNotFoundException)3 ArrayList (java.util.ArrayList)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2