Search in sources :

Example 46 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDFSHAAdminMiniCluster method testTransitionToActiveWhenOtherNamenodeisActive.

/**
   * Test case to check whether both the name node is active or not
   * @throws Exception
   */
@Test
public void testTransitionToActiveWhenOtherNamenodeisActive() throws Exception {
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    if (nn1.getState() != null && !nn1.getState().equals(HAServiceState.STANDBY.name())) {
        cluster.transitionToStandby(0);
    }
    if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
        cluster.transitionToStandby(1);
    }
    //Making sure both the namenode are in standby state
    assertTrue(nn1.isStandbyState());
    assertTrue(nn2.isStandbyState());
    // Triggering the transition for both namenode to Active
    runTool("-transitionToActive", "nn1");
    runTool("-transitionToActive", "nn2");
    assertFalse("Both namenodes cannot be active", nn1.isActiveState() && nn2.isActiveState());
    /*  In this test case, we have deliberately shut down nn1 and this will
        cause HAAAdmin#isOtherTargetNodeActive to throw an Exception 
        and transitionToActive for nn2 with  forceActive switch will succeed 
        even with Exception  */
    cluster.shutdownNameNode(0);
    if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
        cluster.transitionToStandby(1);
    }
    //Making sure both the namenode (nn2) is in standby state
    assertTrue(nn2.isStandbyState());
    assertFalse(cluster.isNameNodeUp(0));
    runTool("-transitionToActive", "nn2", "--forceactive");
    assertTrue("Namenode nn2 should be active", nn2.isActiveState());
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Test(org.junit.Test)

Example 47 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDFSZKFailoverController method waitForHAState.

private void waitForHAState(int nnidx, final HAServiceState state) throws TimeoutException, InterruptedException {
    final NameNode nn = cluster.getNameNode(nnidx);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            try {
                return nn.getRpcServer().getServiceStatus().getState() == state;
            } catch (Exception e) {
                e.printStackTrace();
                return false;
            }
        }
    }, 50, 15000);
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) TimeoutException(java.util.concurrent.TimeoutException)

Example 48 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class JspHelper method getTokenUGI.

private static UserGroupInformation getTokenUGI(ServletContext context, HttpServletRequest request, String tokenString, Configuration conf) throws IOException {
    final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(tokenString);
    InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
    if (serviceAddress != null) {
        SecurityUtil.setTokenService(token, serviceAddress);
        token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
    }
    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
    DataInputStream in = new DataInputStream(buf);
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    if (context != null) {
        final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
        if (nn != null) {
            // Verify the token.
            nn.getNamesystem().verifyToken(id, token.getPassword());
        }
    }
    UserGroupInformation ugi = id.getUser();
    ugi.addToken(token);
    return ugi;
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) ByteArrayInputStream(java.io.ByteArrayInputStream) InetSocketAddress(java.net.InetSocketAddress) Token(org.apache.hadoop.security.token.Token) DataInputStream(java.io.DataInputStream) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 49 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestMRCredentials method test.

/**
   * run a distributed job and verify that TokenCache is available
   * @throws IOException
   */
@Test
public void test() throws IOException {
    // make sure JT starts
    Configuration jobConf = new JobConf(mrCluster.getConfig());
    // provide namenodes names for the job to get the delegation tokens for
    NameNode nn = dfsCluster.getNameNode();
    URI nnUri = DFSUtilClient.getNNUri(nn.getNameNodeAddress());
    jobConf.set(JobContext.JOB_NAMENODES, nnUri + "," + nnUri.toString());
    jobConf.set("mapreduce.job.credentials.json", "keys.json");
    // using argument to pass the file name
    String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
    int res = -1;
    try {
        res = ToolRunner.run(jobConf, new CredentialsTestJob(), args);
    } catch (Exception e) {
        System.out.println("Job failed with" + e.getLocalizedMessage());
        e.printStackTrace(System.out);
        fail("Job failed");
    }
    assertEquals("dist job res is not 0", res, 0);
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Configuration(org.apache.hadoop.conf.Configuration) JobConf(org.apache.hadoop.mapred.JobConf) URI(java.net.URI) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 50 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class MiniDFSCluster method restartNameNode.

/**
   * Restart the namenode at a given index. Optionally wait for the cluster
   * to become active.
   */
public synchronized void restartNameNode(int nnIndex, boolean waitActive, String... args) throws IOException {
    NameNodeInfo info = getNN(nnIndex);
    StartupOption startOpt = info.startOpt;
    shutdownNameNode(nnIndex);
    if (args.length != 0) {
        startOpt = null;
    } else {
        args = createArgs(startOpt);
    }
    NameNode nn = NameNode.createNameNode(args, info.conf);
    info.nameNode = nn;
    info.setStartOpt(startOpt);
    if (waitActive) {
        waitClusterUp();
        LOG.info("Restarted the namenode");
        waitActive();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4