Search in sources :

Example 6 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFsck method testBlockIdCKMaintenance.

/**
   * Test for blockIdCK with datanode maintenance.
   */
@Test(timeout = 90000)
public void testBlockIdCKMaintenance() throws Exception {
    final short replFactor = 2;
    short numDn = 2;
    final long blockSize = 512;
    String[] hosts = { "host1", "host2" };
    String[] racks = { "/rack1", "/rack2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, replFactor);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before maintenance
    String outStr = runFsck(conf, 0, true, "/", "-maintenance", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    DatanodeManager dnm = bm.getDatanodeManager();
    DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
    bm.getDatanodeManager().getDecomManager().startMaintenance(dn, Long.MAX_VALUE);
    final String dnName = dn.getXferAddr();
    //wait for the node to enter maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isEnteringMaintenance()) {
                    String fsckOut = runFsck(conf, 5, false, "/", "-maintenance", "-blockId", bIds[0]);
                    assertTrue(fsckOut.contains(NamenodeFsck.ENTERING_MAINTENANCE_STATUS));
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // Start 3rd DataNode
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/rack3" }, new String[] { "host3" }, null, false);
    // Wait for 1st node to reach in maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                DatanodeInfo datanodeInfo = null;
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isInMaintenance()) {
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    //check in maintenance node
    String fsckOut = runFsck(conf, 4, false, "/", "-maintenance", "-blockId", bIds[0]);
    assertTrue(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
    //check in maintenance node are not printed when not requested
    fsckOut = runFsck(conf, 4, false, "/", "-blockId", bIds[0]);
    assertFalse(fsckOut.contains(NamenodeFsck.IN_MAINTENANCE_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 7 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFsck method testFsckWithMaintenanceReplicas.

/**
   * Test for blocks on maintenance hosts are not shown as missing.
   */
@Test(timeout = 90000)
public void testFsckWithMaintenanceReplicas() throws Exception {
    final short replFactor = 2;
    short numDn = 2;
    final long blockSize = 512;
    String[] hosts = { "host1", "host2" };
    String[] racks = { "/rack1", "/rack2" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, replFactor);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY, replFactor);
    DistributedFileSystem dfs;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts).racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    DFSTestUtil util = new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    util.createFile(dfs, path, 1024, replFactor, 1000L);
    util.waitReplication(dfs, path, replFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb : util.getAllBlocks(dfs, path)) {
        sb.append(lb.getBlock().getLocalBlock().getBlockName() + " ");
    }
    String[] bIds = sb.toString().split(" ");
    //make sure datanode that has replica is fine before maintenance
    String outStr = runFsck(conf, 0, true, testFile);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    FSNamesystem fsn = cluster.getNameNode().getNamesystem();
    BlockManager bm = fsn.getBlockManager();
    DatanodeManager dnm = bm.getDatanodeManager();
    DatanodeDescriptor dn = dnm.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
    bm.getDatanodeManager().getDecomManager().startMaintenance(dn, Long.MAX_VALUE);
    final String dnName = dn.getXferAddr();
    //wait for the node to enter maintenance state
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isEnteringMaintenance()) {
                    // verify fsck returns Healthy status
                    String fsckOut = runFsck(conf, 0, true, testFile, "-maintenance");
                    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // Start 3rd DataNode and wait for node to reach in maintenance state
    cluster.startDataNodes(conf, 1, true, null, new String[] { "/rack3" }, new String[] { "host3" }, null, false);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            DatanodeInfo datanodeInfo = null;
            try {
                for (DatanodeInfo info : dfs.getDataNodeStats()) {
                    if (dnName.equals(info.getXferAddr())) {
                        datanodeInfo = info;
                    }
                }
                if (datanodeInfo != null && datanodeInfo.isInMaintenance()) {
                    return true;
                }
            } catch (Exception e) {
                LOG.warn("Unexpected exception: " + e);
                return false;
            }
            return false;
        }
    }, 500, 30000);
    // verify fsck returns Healthy status
    String fsckOut = runFsck(conf, 0, true, testFile, "-maintenance");
    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
    // verify fsck returns Healthy status even without maintenance option
    fsckOut = runFsck(conf, 0, true, testFile);
    assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 8 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirAppendOp method appendFile.

/**
   * Append to an existing file.
   * <p>
   *
   * The method returns the last block of the file if this is a partial block,
   * which can still be used for writing more data. The client uses the
   * returned block locations to form the data pipeline for this block.<br>
   * The {@link LocatedBlock} will be null if the last block is full.
   * The client then allocates a new block with the next call using
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
   * <p>
   *
   * For description of parameters and exceptions thrown see
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param pc permission checker to check fs permission
   * @param holder client name
   * @param clientMachine client machine info
   * @param newBlock if the data is appended to a new block
   * @param logRetryCache whether to record RPC ids in editlog for retry cache
   *                      rebuilding
   * @return the last block with status
   */
static LastBlockWithStatus appendFile(final FSNamesystem fsn, final String srcArg, final FSPermissionChecker pc, final String holder, final String clientMachine, final boolean newBlock, final boolean logRetryCache) throws IOException {
    assert fsn.hasWriteLock();
    final LocatedBlock lb;
    final FSDirectory fsd = fsn.getFSDirectory();
    final INodesInPath iip;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        // Verify that the destination does not exist as a directory already
        final INode inode = iip.getLastINode();
        final String path = iip.getPath();
        if (inode != null && inode.isDirectory()) {
            throw new FileAlreadyExistsException("Cannot append to directory " + path + "; already exists as a directory.");
        }
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        if (inode == null) {
            throw new FileNotFoundException("Failed to append to non-existent file " + path + " for client " + clientMachine);
        }
        final INodeFile file = INodeFile.valueOf(inode, path, true);
        // not support appending file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot append to files with striped block " + path);
        }
        BlockManager blockManager = fsd.getBlockManager();
        final BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot append to lazy persist file " + path);
        }
        // Opening an existing file for append - may need to recover lease.
        fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder, clientMachine, false);
        final BlockInfo lastBlock = file.getLastBlock();
        // Check that the block has at least minimum replication.
        if (lastBlock != null) {
            if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
                throw new RetriableException(new NotReplicatedYetException("append: lastBlock=" + lastBlock + " of src=" + path + " is COMMITTED but not yet COMPLETE."));
            } else if (lastBlock.isComplete() && !blockManager.isSufficientlyReplicated(lastBlock)) {
                throw new IOException("append: lastBlock=" + lastBlock + " of src=" + path + " is not sufficiently replicated yet.");
            }
        }
        lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock, true, logRetryCache);
    } catch (IOException ie) {
        NameNode.stateChangeLog.warn("DIR* NameSystem.append: " + ie.getMessage());
        throw ie;
    } finally {
        fsd.writeUnlock();
    }
    HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
    if (lb != null) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file {} for {} at {} block {} block" + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb.getBlock().getNumBytes());
    }
    return new LastBlockWithStatus(lb, stat);
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 9 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class FSDirTruncateOp method prepareFileForTruncate.

/**
   * Convert current INode to UnderConstruction. Recreate lease. Create new
   * block for the truncated copy. Schedule truncation of the replicas.
   *
   * @param fsn namespace
   * @param iip inodes in the path containing the file
   * @param leaseHolder lease holder
   * @param clientMachine client machine info
   * @param lastBlockDelta last block delta size
   * @param newBlock new block
   * @return the returned block will be written to editLog and passed back
   *         into this method upon loading.
   * @throws IOException
   */
@VisibleForTesting
static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, String leaseHolder, String clientMachine, long lastBlockDelta, Block newBlock) throws IOException {
    assert fsn.hasWriteLock();
    INodeFile file = iip.getLastINode().asFile();
    assert !file.isStriped();
    file.recordModification(iip.getLatestSnapshotId());
    file.toUnderConstruction(leaseHolder, clientMachine);
    assert file.isUnderConstruction() : "inode should be under construction.";
    fsn.getLeaseManager().addLease(file.getFileUnderConstructionFeature().getClientName(), file.getId());
    boolean shouldRecoverNow = (newBlock == null);
    BlockInfo oldBlock = file.getLastBlock();
    boolean shouldCopyOnTruncate = shouldCopyOnTruncate(fsn, file, oldBlock);
    if (newBlock == null) {
        newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock(BlockType.CONTIGUOUS) : new Block(oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockManager().isLegacyBlock(oldBlock)));
    }
    final BlockInfo truncatedBlockUC;
    BlockManager blockManager = fsn.getFSDirectory().getBlockManager();
    if (shouldCopyOnTruncate) {
        // Add new truncateBlock into blocksMap and
        // use oldBlock as a source for copy-on-truncate recovery
        truncatedBlockUC = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
        truncatedBlockUC.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, blockManager.getStorages(oldBlock));
        truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
        truncatedBlockUC.getUnderConstructionFeature().setTruncateBlock(oldBlock);
        file.setLastBlock(truncatedBlockUC);
        blockManager.addBlockCollection(truncatedBlockUC, file);
        NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" + " size {}  new block {} old block {}", truncatedBlockUC.getNumBytes(), newBlock, oldBlock);
    } else {
        // Use new generation stamp for in-place truncate recovery
        blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta);
        oldBlock = file.getLastBlock();
        assert !oldBlock.isComplete() : "oldBlock should be under construction";
        BlockUnderConstructionFeature uc = oldBlock.getUnderConstructionFeature();
        uc.setTruncateBlock(new Block(oldBlock));
        uc.getTruncateBlock().setNumBytes(oldBlock.getNumBytes() - lastBlockDelta);
        uc.getTruncateBlock().setGenerationStamp(newBlock.getGenerationStamp());
        truncatedBlockUC = oldBlock;
        NameNode.stateChangeLog.debug("BLOCK* prepareFileForTruncate: " + "{} Scheduling in-place block truncate to new size {}", uc, uc.getTruncateBlock().getNumBytes());
    }
    if (shouldRecoverNow) {
        truncatedBlockUC.getUnderConstructionFeature().initializeBlockRecovery(truncatedBlockUC, newBlock.getGenerationStamp());
    }
    return newBlock;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockUnderConstructionFeature(org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Block(org.apache.hadoop.hdfs.protocol.Block) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 10 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestMaintenanceState method checkFile.

/*
  * Verify that the number of replicas are as expected for each block in
  * the given file.
  *
  * @return - null if no failure found, else an error message string.
  */
static String checkFile(FSNamesystem ns, FileSystem fileSys, Path name, int repl, DatanodeInfo expectedExcludedNode, DatanodeInfo expectedMaintenanceNode) throws IOException {
    // need a raw stream
    assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
    BlockManager bm = ns.getBlockManager();
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    String output;
    for (LocatedBlock blk : dinfo) {
        // for each block
        DatanodeInfo[] nodes = blk.getLocations();
        for (int j = 0; j < nodes.length; j++) {
            // for each replica
            if (expectedExcludedNode != null && nodes[j].equals(expectedExcludedNode)) {
                //excluded node must not be in LocatedBlock.
                output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " found in LocatedBlock.";
                LOG.info(output);
                return output;
            } else {
                if (nodes[j].isInMaintenance()) {
                    //IN_MAINTENANCE node must not be in LocatedBlock.
                    output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " which is in maintenance state.";
                    LOG.info(output);
                    return output;
                }
            }
        }
        if (repl != nodes.length) {
            output = "Wrong number of replicas for block " + blk.getBlock() + ": expected " + repl + ", got " + nodes.length + " ,";
            for (int j = 0; j < nodes.length; j++) {
                // for each replica
                output += nodes[j] + ",";
            }
            output += "pending block # " + ns.getPendingReplicationBlocks() + " ,";
            output += "under replicated # " + ns.getUnderReplicatedBlocks() + " ,";
            if (expectedExcludedNode != null) {
                output += "excluded node " + expectedExcludedNode;
            }
            LOG.info(output);
            return output;
        }
        // Verify it has the expected maintenance node
        Iterator<DatanodeStorageInfo> storageInfoIter = bm.getStorages(blk.getBlock().getLocalBlock()).iterator();
        List<DatanodeInfo> maintenanceNodes = new ArrayList<>();
        while (storageInfoIter.hasNext()) {
            DatanodeInfo node = storageInfoIter.next().getDatanodeDescriptor();
            if (node.isMaintenance()) {
                maintenanceNodes.add(node);
            }
        }
        if (expectedMaintenanceNode != null) {
            if (!maintenanceNodes.contains(expectedMaintenanceNode)) {
                output = "No maintenance replica on " + expectedMaintenanceNode;
                LOG.info(output);
                return output;
            }
        } else {
            if (maintenanceNodes.size() != 0) {
                output = "Has maintenance replica(s)";
                LOG.info(output);
                return output;
            }
        }
    }
    return null;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6