Search in sources :

Example 6 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestBlockMissingException method testBlockMissingException.

/**
   * Test DFS Raid
   */
@Test
public void testBlockMissingException() throws Exception {
    LOG.info("Test testBlockMissingException started.");
    long blockSize = 1024L;
    int numBlocks = 4;
    conf = new HdfsConfiguration();
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    try {
        dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
        dfs.waitActive();
        fileSys = dfs.getFileSystem();
        Path file1 = new Path("/user/dhruba/raidtest/file1");
        createOldFile(fileSys, file1, 1, numBlocks, blockSize);
        // extract block locations from File system. Wait till file is closed.
        LocatedBlocks locations = null;
        locations = fileSys.dfs.getNamenode().getBlockLocations(file1.toString(), 0, numBlocks * blockSize);
        // remove block of file
        LOG.info("Remove first block of file");
        dfs.corruptBlockOnDataNodesByDeletingBlockFile(locations.get(0).getBlock());
        // validate that the system throws BlockMissingException
        validateFile(fileSys, file1);
    } finally {
        if (fileSys != null)
            fileSys.close();
        if (dfs != null)
            dfs.shutdown();
    }
    LOG.info("Test testBlockMissingException completed.");
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Test(org.junit.Test)

Example 7 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestSafeModeWithStripedFile method doTest.

/**
   * This util writes a small block group whose size is given by caller.
   * Then write another 2 full stripe blocks.
   * Then shutdown all DNs and start again one by one. and verify the safemode
   * status accordingly.
   *
   * @param smallSize file size of the small block group
   * @param minStorages minimum replicas needed by the block so it can be safe
   */
private void doTest(int smallSize, int minStorages) throws IOException {
    FileSystem fs = cluster.getFileSystem();
    // add 1 block
    byte[] data = StripedFileTestUtil.generateBytes(smallSize);
    Path smallFilePath = new Path("/testStripedFile_" + smallSize);
    DFSTestUtil.writeFile(fs, smallFilePath, data);
    // If we only have 1 block, NN won't enter safemode in the first place
    // because the threshold is 0 blocks.
    // So we need to add another 2 blocks.
    int bigSize = blockSize * dataBlocks * 2;
    Path bigFilePath = new Path("/testStripedFile_" + bigSize);
    data = StripedFileTestUtil.generateBytes(bigSize);
    DFSTestUtil.writeFile(fs, bigFilePath, data);
    // now we have 3 blocks. NN needs 2 blocks to reach the threshold 0.9 of
    // total blocks 3.
    // stopping all DNs
    List<MiniDFSCluster.DataNodeProperties> dnprops = Lists.newArrayList();
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(smallFilePath.toString(), 0, smallSize);
    DatanodeInfo[] locations = lbs.get(0).getLocations();
    for (DatanodeInfo loc : locations) {
        // keep the DNs that have smallFile in the head of dnprops
        dnprops.add(cluster.stopDataNode(loc.getName()));
    }
    for (int i = 0; i < numDNs - locations.length; i++) {
        dnprops.add(cluster.stopDataNode(0));
    }
    cluster.restartNameNode(0);
    NameNode nn = cluster.getNameNode();
    assertTrue(cluster.getNameNode().isInSafeMode());
    assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // so the safe blocks count doesn't increment.
    for (int i = 0; i < minStorages - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        assertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    }
    // the block of smallFile reaches minStorages,
    // so the safe blocks count increment.
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    assertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
    for (int i = minStorages; i < dataBlocks - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        assertTrue(nn.isInSafeMode());
    }
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    assertFalse(nn.isInSafeMode());
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 8 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestReplication method waitForBlockReplication.

private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec, boolean isUnderConstruction, boolean noOverReplication) throws IOException {
    long start = Time.monotonicNow();
    //wait for all the blocks to be replicated;
    LOG.info("Checking for block replication for " + filename);
    while (true) {
        boolean replOk = true;
        LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
        for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator(); iter.hasNext(); ) {
            LocatedBlock block = iter.next();
            if (isUnderConstruction && !iter.hasNext()) {
                // do not check the last block
                break;
            }
            int actual = block.getLocations().length;
            if (noOverReplication) {
                assertTrue(actual <= expected);
            }
            if (actual < expected) {
                LOG.info("Not enough replicas for " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + ".");
                replOk = false;
                break;
            }
        }
        if (replOk) {
            return;
        }
        if (maxWaitSec > 0 && (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
            throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename);
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException ignored) {
        }
    }
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 9 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestReplication method checkFile.

/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    break;
                }
            }
            assertTrue(found);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i]);
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    assertTrue(isNotOnSameRack);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 10 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestSmallBlock method checkFile.

private void checkFile(DistributedFileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    assertEquals("Number of blocks", fileSize, locations.length);
    FSDataInputStream stm = fileSys.open(name);
    byte[] expected = new byte[fileSize];
    if (simulatedStorage) {
        LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(), 0, fileSize);
        DFSTestUtil.fillExpectedBuf(lbs, expected);
    } else {
        Random rand = new Random(seed);
        rand.nextBytes(expected);
    }
    // do a sanity check. Read the file
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
    stm.close();
}
Also used : Random(java.util.Random) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) BlockLocation(org.apache.hadoop.fs.BlockLocation)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7