Search in sources :

Example 1 with DelegationTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.

the class TestWebHdfsUrl method getWebHdfsFileSystem.

private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
        FSNamesystem namesystem = mock(FSNamesystem.class);
        DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
        dtSecretManager.startThreads();
        Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
        SecurityUtil.setTokenService(token, NetUtils.createSocketAddr(uri.getAuthority()));
        token.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
        ugi.addToken(token);
    }
    return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
Also used : DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) Text(org.apache.hadoop.io.Text) Token(org.apache.hadoop.security.token.Token) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 2 with DelegationTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.

the class TestSecurityTokenEditLog method testEditsForCancelOnTokenExpire.

@Test(timeout = 10000)
public void testEditsForCancelOnTokenExpire() throws IOException, InterruptedException {
    long renewInterval = 2000;
    Configuration conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, renewInterval);
    conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, renewInterval * 2);
    Text renewer = new Text(UserGroupInformation.getCurrentUser().getUserName());
    FSImage fsImage = mock(FSImage.class);
    FSEditLog log = mock(FSEditLog.class);
    doReturn(log).when(fsImage).getEditLog();
    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
    DelegationTokenSecretManager dtsm = fsn.getDelegationTokenSecretManager();
    try {
        dtsm.startThreads();
        // get two tokens
        Token<DelegationTokenIdentifier> token1 = fsn.getDelegationToken(renewer);
        Token<DelegationTokenIdentifier> token2 = fsn.getDelegationToken(renewer);
        DelegationTokenIdentifier ident1 = token1.decodeIdentifier();
        DelegationTokenIdentifier ident2 = token2.decodeIdentifier();
        // verify we got the tokens
        verify(log, times(1)).logGetDelegationToken(eq(ident1), anyLong());
        verify(log, times(1)).logGetDelegationToken(eq(ident2), anyLong());
        // this is a little tricky because DTSM doesn't let us set scan interval
        // so need to periodically sleep, then stop/start threads to force scan
        // renew first token 1/2 to expire
        Thread.sleep(renewInterval / 2);
        fsn.renewDelegationToken(token2);
        verify(log, times(1)).logRenewDelegationToken(eq(ident2), anyLong());
        // force scan and give it a little time to complete
        dtsm.stopThreads();
        dtsm.startThreads();
        Thread.sleep(250);
        // no token has expired yet 
        verify(log, times(0)).logCancelDelegationToken(eq(ident1));
        verify(log, times(0)).logCancelDelegationToken(eq(ident2));
        // sleep past expiration of 1st non-renewed token
        Thread.sleep(renewInterval / 2);
        dtsm.stopThreads();
        dtsm.startThreads();
        Thread.sleep(250);
        // non-renewed token should have implicitly been cancelled
        verify(log, times(1)).logCancelDelegationToken(eq(ident1));
        verify(log, times(0)).logCancelDelegationToken(eq(ident2));
        // sleep past expiration of 2nd renewed token
        Thread.sleep(renewInterval / 2);
        dtsm.stopThreads();
        dtsm.startThreads();
        Thread.sleep(250);
        // both tokens should have been implicitly cancelled by now
        verify(log, times(1)).logCancelDelegationToken(eq(ident1));
        verify(log, times(1)).logCancelDelegationToken(eq(ident2));
    } finally {
        dtsm.stopThreads();
    }
}
Also used : DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Example 3 with DelegationTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.

the class TestDelegationToken method testDTManagerInSafeMode.

/**
   * Test that the delegation token secret manager only runs when the
   * NN is out of safe mode. This is because the secret manager
   * has to log to the edit log, which should not be written in
   * safe mode. Regression test for HDFS-2579.
   */
@Test
public void testDTManagerInSafeMode() throws Exception {
    cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
    FileSystem fs = cluster.getFileSystem();
    for (int i = 0; i < 5; i++) {
        DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short) 1, 1L);
    }
    cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500);
    cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
    cluster.setWaitSafeMode(false);
    cluster.restartNameNode();
    NameNode nn = cluster.getNameNode();
    assertTrue(nn.isInSafeMode());
    DelegationTokenSecretManager sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
    assertFalse("Secret manager should not run in safe mode", sm.isRunning());
    NameNodeAdapter.leaveSafeMode(nn);
    assertTrue("Secret manager should start when safe mode is exited", sm.isRunning());
    LOG.info("========= entering safemode again");
    NameNodeAdapter.enterSafeMode(nn, false);
    assertFalse("Secret manager should stop again when safe mode " + "is manually entered", sm.isRunning());
    // Set the cluster to leave safemode quickly on its own.
    cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
    cluster.setWaitSafeMode(true);
    cluster.restartNameNode();
    nn = cluster.getNameNode();
    sm = NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
    assertFalse(nn.isInSafeMode());
    assertTrue(sm.isRunning());
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Example 4 with DelegationTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.

the class TestDataNodeUGIProvider method getWebHdfsFileSystem.

private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi, Configuration conf, List<Token<DelegationTokenIdentifier>> tokens) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(ugi.getUserName()), null, null);
        FSNamesystem namesystem = mock(FSNamesystem.class);
        DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(86400000, 86400000, 86400000, 86400000, namesystem);
        dtSecretManager.startThreads();
        Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
        Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
        SecurityUtil.setTokenService(token1, NetUtils.createSocketAddr(uri.getAuthority()));
        SecurityUtil.setTokenService(token2, NetUtils.createSocketAddr(uri.getAuthority()));
        token1.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
        token2.setKind(WebHdfsConstants.WEBHDFS_TOKEN_KIND);
        tokens.add(token1);
        tokens.add(token2);
        ugi.addToken(token1);
        ugi.addToken(token2);
    }
    return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
Also used : DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) Text(org.apache.hadoop.io.Text) Token(org.apache.hadoop.security.token.Token) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 5 with DelegationTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager in project hadoop by apache.

the class TestWebHDFSForHA method testClientFailoverWhenStandbyNNHasStaleCredentials.

@Test
public void testClientFailoverWhenStandbyNNHasStaleCredentials() throws IOException {
    Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    MiniDFSCluster cluster = null;
    WebHdfsFileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
        HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
        cluster.waitActive();
        fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
        cluster.transitionToActive(0);
        Token<?> token = fs.getDelegationToken(null);
        final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
        identifier.readFields(new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(cluster.getNamesystem(0));
        ExceptionHandler eh = new ExceptionHandler();
        eh.initResponse(mock(HttpServletResponse.class));
        Response resp = null;
        try {
            secretManager.retrievePassword(identifier);
        } catch (IOException e) {
            // Mimic the UserProvider class logic (server side) by throwing
            // SecurityException here
            Assert.assertTrue(e instanceof SecretManager.InvalidToken);
            resp = eh.toResponse(new SecurityException(e));
        }
        // The Response (resp) below is what the server will send to client
        //
        // BEFORE HDFS-6475 fix, the resp.entity is
        //     {"RemoteException":{"exception":"SecurityException",
        //      "javaClassName":"java.lang.SecurityException",
        //      "message":"Failed to obtain user group information:
        //      org.apache.hadoop.security.token.SecretManager$InvalidToken:
        //        StandbyException"}}
        // AFTER the fix, the resp.entity is
        //     {"RemoteException":{"exception":"StandbyException",
        //      "javaClassName":"org.apache.hadoop.ipc.StandbyException",
        //      "message":"Operation category READ is not supported in
        //       state standby"}}
        //
        // Mimic the client side logic by parsing the response from server
        //
        Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
        RemoteException re = JsonUtilClient.toRemoteException(m);
        Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
        Assert.assertTrue(unwrapped instanceof StandbyException);
    } finally {
        IOUtils.cleanup(null, fs);
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) HttpServletResponse(javax.servlet.http.HttpServletResponse) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ExceptionHandler(org.apache.hadoop.hdfs.web.resources.ExceptionHandler) HttpServletResponse(javax.servlet.http.HttpServletResponse) Response(javax.ws.rs.core.Response) DelegationTokenSecretManager(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager) StandbyException(org.apache.hadoop.ipc.StandbyException) ByteArrayInputStream(java.io.ByteArrayInputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Aggregations

DelegationTokenSecretManager (org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager)6 DelegationTokenIdentifier (org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier)5 Text (org.apache.hadoop.io.Text)3 Test (org.junit.Test)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 DataInputStream (java.io.DataInputStream)2 Configuration (org.apache.hadoop.conf.Configuration)2 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)2 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)2 Token (org.apache.hadoop.security.token.Token)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 HttpServletResponse (javax.servlet.http.HttpServletResponse)1 Response (javax.ws.rs.core.Response)1 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1