Search in sources :

Example 6 with BlockReportReplica

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.

the class BlockManager method processReport.

private Collection<Block> processReport(final DatanodeStorageInfo storageInfo, final BlockListAsLongs report, BlockReportContext context) throws IOException {
    // Normal case:
    // Modify the (block-->datanode) map, according to the difference
    // between the old and new block report.
    //
    Collection<BlockInfoToAdd> toAdd = new LinkedList<>();
    Collection<BlockInfo> toRemove = new TreeSet<>();
    Collection<Block> toInvalidate = new LinkedList<>();
    Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<>();
    Collection<StatefulBlockInfo> toUC = new LinkedList<>();
    boolean sorted = false;
    String strBlockReportId = "";
    if (context != null) {
        sorted = context.isSorted();
        strBlockReportId = Long.toHexString(context.getReportId());
    }
    Iterable<BlockReportReplica> sortedReport;
    if (!sorted) {
        blockLog.warn("BLOCK* processReport 0x{}: Report from the DataNode ({}) " + "is unsorted. This will cause overhead on the NameNode " + "which needs to sort the Full BR. Please update the " + "DataNode to the same version of Hadoop HDFS as the " + "NameNode ({}).", strBlockReportId, storageInfo.getDatanodeDescriptor().getDatanodeUuid(), VersionInfo.getVersion());
        Set<BlockReportReplica> set = new FoldedTreeSet<>();
        for (BlockReportReplica iblk : report) {
            set.add(new BlockReportReplica(iblk));
        }
        sortedReport = set;
    } else {
        sortedReport = report;
    }
    reportDiffSorted(storageInfo, sortedReport, toAdd, toRemove, toInvalidate, toCorrupt, toUC);
    DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
    // Process the blocks on each queue
    for (StatefulBlockInfo b : toUC) {
        addStoredBlockUnderConstruction(b, storageInfo);
    }
    for (BlockInfo b : toRemove) {
        removeStoredBlock(b, node);
    }
    int numBlocksLogged = 0;
    for (BlockInfoToAdd b : toAdd) {
        addStoredBlock(b.stored, b.reported, storageInfo, null, numBlocksLogged < maxNumBlocksToLog);
        numBlocksLogged++;
    }
    if (numBlocksLogged > maxNumBlocksToLog) {
        blockLog.info("BLOCK* processReport 0x{}: logged info for {} of {} " + "reported.", strBlockReportId, maxNumBlocksToLog, numBlocksLogged);
    }
    for (Block b : toInvalidate) {
        addToInvalidates(b, node);
    }
    for (BlockToMarkCorrupt b : toCorrupt) {
        markBlockAsCorrupt(b, storageInfo, node);
    }
    return toInvalidate;
}
Also used : BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) FoldedTreeSet(org.apache.hadoop.hdfs.util.FoldedTreeSet) LinkedList(java.util.LinkedList) TreeSet(java.util.TreeSet) FoldedTreeSet(org.apache.hadoop.hdfs.util.FoldedTreeSet) ReportedBlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) CachedBlock(org.apache.hadoop.hdfs.server.namenode.CachedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 7 with BlockReportReplica

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.

the class BlockPoolSlice method readReplicasFromCache.

private boolean readReplicasFromCache(ReplicaMap volumeMap, final RamDiskReplicaTracker lazyWriteReplicaMap) {
    ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock());
    File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
    // Check whether the file exists or not.
    if (!replicaFile.exists()) {
        LOG.info("Replica Cache file: " + replicaFile.getPath() + " doesn't exist ");
        return false;
    }
    long fileLastModifiedTime = replicaFile.lastModified();
    if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
        LOG.info("Replica Cache file: " + replicaFile.getPath() + " has gone stale");
        // Just to make findbugs happy
        if (!replicaFile.delete()) {
            LOG.info("Replica Cache file: " + replicaFile.getPath() + " cannot be deleted");
        }
        return false;
    }
    FileInputStream inputStream = null;
    try {
        inputStream = fileIoProvider.getFileInputStream(volume, replicaFile);
        BlockListAsLongs blocksList = BlockListAsLongs.readFrom(inputStream, maxDataLength);
        if (blocksList == null) {
            return false;
        }
        for (BlockReportReplica replica : blocksList) {
            switch(replica.getState()) {
                case FINALIZED:
                    addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
                    break;
                case RUR:
                case RBW:
                case RWR:
                    addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
                    break;
                default:
                    break;
            }
        }
        inputStream.close();
        // to scan all the files on disk.
        for (Iterator<ReplicaInfo> iter = tmpReplicaMap.replicas(bpid).iterator(); iter.hasNext(); ) {
            ReplicaInfo info = iter.next();
            // We use a lightweight GSet to store replicaInfo, we need to remove
            // it from one GSet before adding to another.
            iter.remove();
            volumeMap.add(bpid, info);
        }
        LOG.info("Successfully read replica from cache file : " + replicaFile.getPath());
        return true;
    } catch (Exception e) {
        // Any exception we need to revert back to read from disk
        // Log the error and return false
        LOG.info("Exception occured while reading the replicas cache file: " + replicaFile.getPath(), e);
        return false;
    } finally {
        if (!fileIoProvider.delete(volume, replicaFile)) {
            LOG.info("Failed to delete replica cache file: " + replicaFile.getPath());
        }
        // close the inputStream
        IOUtils.closeStream(inputStream);
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) FileInputStream(java.io.FileInputStream) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException)

Example 8 with BlockReportReplica

use of org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica in project hadoop by apache.

the class TestFileCorruption method testFileCorruption.

/** check if DFS can handle corrupted blocks properly */
@Test
public void testFileCorruption() throws Exception {
    MiniDFSCluster cluster = null;
    DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        FileSystem fs = cluster.getFileSystem();
        util.createFiles(fs, "/srcdat");
        // Now deliberately remove the blocks
        String bpid = cluster.getNamesystem().getBlockPoolId();
        DataNode dn = cluster.getDataNodes().get(2);
        Map<DatanodeStorage, BlockListAsLongs> blockReports = dn.getFSDataset().getBlockReports(bpid);
        assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
        for (BlockListAsLongs report : blockReports.values()) {
            for (BlockReportReplica brr : report) {
                LOG.info("Deliberately removing block {}", brr.getBlockName());
                cluster.getFsDatasetTestUtils(2).getMaterializedReplica(new ExtendedBlock(bpid, brr)).deleteData();
            }
        }
        assertTrue("Corrupted replicas not handled properly.", util.checkFiles(fs, "/srcdat"));
        util.cleanup(fs, "/srcdat");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Test(org.junit.Test)

Aggregations

BlockReportReplica (org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica)8 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 ReportedBlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo)3 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)3 Block (org.apache.hadoop.hdfs.protocol.Block)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)2 StoredReplicaState (org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.StoredReplicaState)2 ReplicaState (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState)2 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)2 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)2 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 RandomAccessFile (java.io.RandomAccessFile)1 HashMap (java.util.HashMap)1 LinkedList (java.util.LinkedList)1 Map (java.util.Map)1