Search in sources :

Example 26 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestDataNodeVolumeFailure method triggerFailure.

/**
   * go to each block on the 2nd DataNode until it fails...
   * @param path
   * @param size
   * @throws IOException
   */
private void triggerFailure(String path, long size) throws IOException {
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();
    for (LocatedBlock lb : locatedBlocks) {
        DatanodeInfo dinfo = lb.getLocations()[1];
        ExtendedBlock b = lb.getBlock();
        try {
            accessBlock(dinfo, lb);
        } catch (IOException e) {
            System.out.println("Failure triggered, on block: " + b.getBlockId() + "; corresponding volume should be removed by now");
            break;
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 27 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestAddBlockRetry method testAddBlockRetryShouldReturnBlockWithLocations.

/*
   * Since NameNode will not persist any locations of the block, addBlock()
   * retry call after restart NN should re-select the locations and return to
   * client. refer HDFS-5257
   */
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    // create file
    nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null);
    // start first addBlock()
    LOG.info("Starting first addBlock for " + src);
    LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    assertTrue("Block locations should be present", lb1.getLocations().length > 0);
    cluster.restartNameNode();
    nameNodeRpc = cluster.getNameNodeRpc();
    LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 28 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestNamenodeRetryCache method testUpdatePipelineWithFailOver.

/**
   * Make sure a retry call does not hang because of the exception thrown in the
   * first call.
   */
@Test(timeout = 60000)
public void testUpdatePipelineWithFailOver() throws Exception {
    cluster.shutdown();
    nnRpc = null;
    filesystem = null;
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    cluster.waitActive();
    NamenodeProtocols ns0 = cluster.getNameNodeRpc(0);
    ExtendedBlock oldBlock = new ExtendedBlock();
    ExtendedBlock newBlock = new ExtendedBlock();
    DatanodeID[] newNodes = new DatanodeID[2];
    String[] newStorages = new String[2];
    newCall();
    try {
        ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
        fail("Expect StandbyException from the updatePipeline call");
    } catch (StandbyException e) {
        // expected, since in the beginning both nn are in standby state
        GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
    }
    cluster.transitionToActive(0);
    try {
        ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
    } catch (IOException e) {
    // ignore call should not hang.
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) StandbyException(org.apache.hadoop.ipc.StandbyException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) Test(org.junit.Test)

Example 29 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestStartup method checkNameSpace.

private void checkNameSpace(Configuration conf) throws IOException {
    NameNode namenode = new NameNode(conf);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace(0, 0);
    namenode.stop();
    namenode.join();
    namenode.joinHttpServer();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)

Example 30 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestStartup method testNNRestart.

/**
   * This test tests hosts include list contains host names.  After namenode
   * restarts, the still alive datanodes should not have any trouble in getting
   * registrant again.
   */
@Test
public void testNNRestart() throws IOException, InterruptedException {
    MiniDFSCluster cluster = null;
    // heartbeat interval in seconds
    int HEARTBEAT_INTERVAL = 1;
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(config, "work-dir/restartnn");
    byte[] b = { 127, 0, 0, 1 };
    InetAddress inetAddress = InetAddress.getByAddress(b);
    hostsFileWriter.initIncludeHosts(new String[] { inetAddress.getHostName() });
    int numDatanodes = 1;
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
        cluster.waitActive();
        cluster.restartNameNode();
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        assertNotNull(nn);
        assertTrue(cluster.isDataNodeUp());
        DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        for (int i = 0; i < 5 && info.length != numDatanodes; i++) {
            Thread.sleep(HEARTBEAT_INTERVAL * 1000);
            info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        }
        assertEquals("Number of live nodes should be " + numDatanodes, numDatanodes, info.length);
    } catch (IOException e) {
        fail(StringUtils.stringifyException(e));
        throw e;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) IOException(java.io.IOException) InetAddress(java.net.InetAddress) Test(org.junit.Test)

Aggregations

NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)54 Test (org.junit.Test)45 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)21 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)16 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)15 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)12 RemoteException (org.apache.hadoop.ipc.RemoteException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 File (java.io.File)8 FileNotFoundException (java.io.FileNotFoundException)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 StandbyException (org.apache.hadoop.ipc.StandbyException)7 EOFException (java.io.EOFException)6 ConnectException (java.net.ConnectException)6 URISyntaxException (java.net.URISyntaxException)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6