Search in sources :

Example 6 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestWebHDFS method testGetFileBlockLocationsBackwardsCompatibility.

@Test
public void testGetFileBlockLocationsBackwardsCompatibility() throws Exception {
    final Configuration conf = WebHdfsTestUtil.createConf();
    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
    HttpServer2 http = null;
    try {
        http = HttpServerFunctionalTest.createTestServer(conf);
        http.addServlet("test", pathSpec, MockWebHdfsServlet.class);
        http.start();
        // Write the address back to configuration so
        // WebHdfsFileSystem could connect to the mock server
        conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + http.getConnectorAddress(0).getPort());
        final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        WebHdfsFileSystem spyFs = spy(webFS);
        BlockLocation[] locations = spyFs.getFileBlockLocations(new Path("p"), 0, 100);
        // Verify result
        assertEquals(1, locations.length);
        assertEquals(121, locations[0].getLength());
        // Verify the fall back
        // The function should be called exactly 2 times
        // 1st time handles GETFILEBLOCKLOCATIONS and found it is not supported
        // 2nd time fall back to handle GET_FILE_BLOCK_LOCATIONS
        verify(spyFs, times(2)).getFileBlockLocations(any(), any(), anyLong(), anyLong());
        // throw an exception.
        try {
            spyFs.getFileBlockLocations(new Path("p"), 0, 100);
        } catch (Exception e) {
            assertTrue(e instanceof IOException);
            assertEquals(e.getMessage(), MockWebHdfsServlet.RANDOM_EXCEPTION_MSG);
            // Totally this function has been called 3 times
            verify(spyFs, times(3)).getFileBlockLocations(any(), any(), anyLong(), anyLong());
        }
    } finally {
        if (http != null) {
            http.stop();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) HttpServer2(org.apache.hadoop.http.HttpServer2) BlockLocation(org.apache.hadoop.fs.BlockLocation) RetriableException(org.apache.hadoop.ipc.RetriableException) SocketException(java.net.SocketException) SocketTimeoutException(java.net.SocketTimeoutException) IOException(java.io.IOException) JSONException(org.codehaus.jettison.json.JSONException) ServletException(javax.servlet.ServletException) URISyntaxException(java.net.URISyntaxException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) AccessControlException(org.apache.hadoop.security.AccessControlException) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 7 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestWebHdfsFileSystemContract method testGetFileBlockLocations.

//the following are new tests (i.e. not over-riding the super class methods)
public void testGetFileBlockLocations() throws IOException {
    final String f = "/test/testGetFileBlockLocations";
    createFile(path(f));
    final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
    final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(new Path(f), 0L, 1L);
    assertEquals(expected.length, computed.length);
    for (int i = 0; i < computed.length; i++) {
        assertEquals(expected[i].toString(), computed[i].toString());
        // Check names
        String[] names1 = expected[i].getNames();
        String[] names2 = computed[i].getNames();
        Arrays.sort(names1);
        Arrays.sort(names2);
        Assert.assertArrayEquals("Names differ", names1, names2);
        // Check topology
        String[] topos1 = expected[i].getTopologyPaths();
        String[] topos2 = computed[i].getTopologyPaths();
        Arrays.sort(topos1);
        Arrays.sort(topos2);
        Assert.assertArrayEquals("Topology differs", topos1, topos2);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockLocation(org.apache.hadoop.fs.BlockLocation)

Example 8 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestSaslDataTransfer method doTest.

/**
   * Tests DataTransferProtocol with the given client configuration.
   *
   * @param conf client configuration
   * @throws IOException if there is an I/O error
   */
private void doTest(HdfsConfiguration conf) throws IOException {
    fs = FileSystem.get(cluster.getURI(), conf);
    FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
    assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE), DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
    BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0, Long.MAX_VALUE);
    assertNotNull(blockLocations);
    assertEquals(NUM_BLOCKS, blockLocations.length);
    for (BlockLocation blockLocation : blockLocations) {
        assertNotNull(blockLocation.getHosts());
        assertEquals(3, blockLocation.getHosts().length);
    }
}
Also used : BlockLocation(org.apache.hadoop.fs.BlockLocation)

Example 9 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestHostsFiles method testHostsExcludeInUI.

@Test
public void testHostsExcludeInUI() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // Two blocks and four racks
    String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        LOG.info("adding '" + name + "' to decommission");
        hostsFileWriter.initExcludeHost(name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
        assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ObjectName(javax.management.ObjectName) FileSystem(org.apache.hadoop.fs.FileSystem) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 10 with BlockLocation

use of org.apache.hadoop.fs.BlockLocation in project hadoop by apache.

the class TestBlocksWithNotEnoughRacks method testNodeDecomissionRespectsRackPolicy.

/*
   * Test that rack policy is still respected when blocks are replicated
   * due to node decommissioning.
   */
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // Two blocks and four racks
    String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        hostsFileWriter.initExcludeHost(name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
        cluster.shutdown();
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) FileSystem(org.apache.hadoop.fs.FileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Aggregations

BlockLocation (org.apache.hadoop.fs.BlockLocation)87 Path (org.apache.hadoop.fs.Path)41 FileStatus (org.apache.hadoop.fs.FileStatus)30 Test (org.junit.Test)29 FileSystem (org.apache.hadoop.fs.FileSystem)16 Configuration (org.apache.hadoop.conf.Configuration)14 ArrayList (java.util.ArrayList)13 IOException (java.io.IOException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)7 InetSocketAddress (java.net.InetSocketAddress)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)5 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 IgfsBlockLocation (org.apache.ignite.igfs.IgfsBlockLocation)5 IgfsPath (org.apache.ignite.igfs.IgfsPath)5 HashMap (java.util.HashMap)4 Random (java.util.Random)4