Search in sources :

Example 51 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestAddStripedBlocks method testAllocateBlockId.

/**
   * Make sure the IDs of striped blocks do not conflict
   */
@Test
public void testAllocateBlockId() throws Exception {
    Path testPath = new Path("/testfile");
    // create a file while allocates a new block
    DFSTestUtil.writeFile(dfs, testPath, "hello, world!");
    LocatedBlocks lb = dfs.getClient().getLocatedBlocks(testPath.toString(), 0);
    final long firstId = lb.get(0).getBlock().getBlockId();
    // delete the file
    dfs.delete(testPath, true);
    // allocate a new block, and make sure the new block's id does not conflict
    // with the previous one
    DFSTestUtil.writeFile(dfs, testPath, "hello again");
    lb = dfs.getClient().getLocatedBlocks(testPath.toString(), 0);
    final long secondId = lb.get(0).getBlock().getBlockId();
    Assert.assertEquals(firstId + HdfsServerConstants.MAX_BLOCKS_IN_GROUP, secondId);
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Test(org.junit.Test)

Example 52 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestReconstructStripedBlocks method testCountLiveReplicas.

/**
   * make sure the NN can detect the scenario where there are enough number of
   * internal blocks (>=9 by default) but there is still missing data/parity
   * block.
   */
@Test
public void testCountLiveReplicas() throws Exception {
    final HdfsConfiguration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        fs.mkdirs(dirPath);
        fs.setErasureCodingPolicy(dirPath, StripedFileTestUtil.getDefaultECPolicy().getName());
        DFSTestUtil.createFile(fs, filePath, cellSize * dataBlocks * 2, (short) 1, 0L);
        // stop a dn
        LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
        LocatedStripedBlock block = (LocatedStripedBlock) blks.getLastLocatedBlock();
        DatanodeInfo dnToStop = block.getLocations()[0];
        MiniDFSCluster.DataNodeProperties dnProp = cluster.stopDataNode(dnToStop.getXferAddr());
        cluster.setDataNodeDead(dnToStop);
        // wait for reconstruction to happen
        DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000);
        // bring the dn back: 10 internal blocks now
        cluster.restartDataNode(dnProp);
        cluster.waitActive();
        // stop another dn: 9 internal blocks, but only cover 8 real one
        dnToStop = block.getLocations()[1];
        cluster.stopDataNode(dnToStop.getXferAddr());
        cluster.setDataNodeDead(dnToStop);
        // currently namenode is able to track the missing block. but restart NN
        cluster.restartNameNode(true);
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerBlockReport(dn);
        }
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager bm = fsn.getBlockManager();
        // wait 3 running cycles of redundancy monitor
        Thread.sleep(3000);
        for (DataNode dn : cluster.getDataNodes()) {
            DataNodeTestUtils.triggerHeartbeat(dn);
        }
        // check if NN can detect the missing internal block and finish the
        // reconstruction
        StripedFileTestUtil.waitForReconstructionFinished(filePath, fs, groupSize);
        boolean reconstructed = false;
        for (int i = 0; i < 5; i++) {
            NumberReplicas num = null;
            fsn.readLock();
            try {
                BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory().getINode4Write(filePath.toString()).asFile().getLastBlock();
                num = bm.countNodes(blockInfo);
            } finally {
                fsn.readUnlock();
            }
            if (num.liveReplicas() >= groupSize) {
                reconstructed = true;
                break;
            } else {
                Thread.sleep(1000);
            }
        }
        Assert.assertTrue(reconstructed);
        blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
        block = (LocatedStripedBlock) blks.getLastLocatedBlock();
        BitSet bitSet = new BitSet(groupSize);
        for (byte index : block.getBlockIndices()) {
            bitSet.set(index);
        }
        for (int i = 0; i < groupSize; i++) {
            Assert.assertTrue(bitSet.get(i));
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) NumberReplicas(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Test(org.junit.Test)

Example 53 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method testPlacementAfterDecommission.

@Test(timeout = 300000)
public void testPlacementAfterDecommission() throws Exception {
    final long fileSize = DEFAULT_BLOCK_SIZE * 5;
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
    // Decommission some nodes and wait until decommissions have finished.
    refreshDatanodeAdminProperties2();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            boolean successful = true;
            LocatedBlocks locatedBlocks;
            try {
                locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
            } catch (IOException ioe) {
                return false;
            }
            for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
                Set<DatanodeInfo> locs = new HashSet<>();
                for (DatanodeInfo datanodeInfo : block.getLocations()) {
                    if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
                        locs.add(datanodeInfo);
                    }
                }
                for (DatanodeID datanodeID : expectedDatanodeIDs) {
                    successful = successful && locs.contains(datanodeID);
                }
            }
            return successful;
        }
    }, 1000, 60000);
    // Verify block placement policy of each block.
    LocatedBlocks locatedBlocks;
    locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
    for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
        BlockPlacementStatus status = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy().verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
        assertTrue(status.isPlacementPolicySatisfied());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HashSet(java.util.HashSet) Set(java.util.Set) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockPlacementStatus(org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Test(org.junit.Test)

Example 54 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method testPlacement.

@Test
public void testPlacement() throws Exception {
    final long fileSize = DEFAULT_BLOCK_SIZE * 5;
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
    LocatedBlocks locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
    for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
        Set<DatanodeInfo> locs = new HashSet<>();
        for (DatanodeInfo datanodeInfo : block.getLocations()) {
            if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
                locs.add(datanodeInfo);
            }
        }
        for (DatanodeID datanodeID : expectedDatanodeIDs) {
            assertTrue(locs.contains(datanodeID));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 55 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hbase by apache.

the class TestBlockReorder method testFromDFS.

private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost) throws Exception {
    // Multiple times as the order is random
    for (int i = 0; i < 10; i++) {
        LocatedBlocks l;
        // The NN gets the block list asynchronously, so we may need multiple tries to get the list
        final long max = System.currentTimeMillis() + 10000;
        boolean done;
        do {
            Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
            l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
            Assert.assertNotNull("Can't get block locations for " + src, l);
            Assert.assertNotNull(l.getLocatedBlocks());
            Assert.assertTrue(l.getLocatedBlocks().size() > 0);
            done = true;
            for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
                done = (l.get(y).getLocations().length == repCount);
            }
        } while (!done);
        for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
            Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
        }
    }
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7