Search in sources :

Example 21 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestSaslRPC method testErrorMessage.

@Test
public void testErrorMessage() throws Exception {
    BadTokenSecretManager sm = new BadTokenSecretManager();
    final Server server = setupTestServer(conf, 5, sm);
    boolean succeeded = false;
    try {
        doDigestRpc(server, sm);
    } catch (ServiceException e) {
        assertTrue(e.getCause() instanceof RemoteException);
        RemoteException re = (RemoteException) e.getCause();
        LOG.info("LOGGING MESSAGE: " + re.getLocalizedMessage());
        assertEquals(ERROR_MESSAGE, re.getLocalizedMessage());
        assertTrue(re.unwrapRemoteException() instanceof InvalidToken);
        succeeded = true;
    }
    assertTrue(succeeded);
}
Also used : SaslServer(javax.security.sasl.SaslServer) ServiceException(com.google.protobuf.ServiceException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Test(org.junit.Test)

Example 22 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestEnhancedByteBufferAccess method testZeroCopyMmapCache.

@Test
public void testZeroCopyMmapCache() throws Exception {
    HdfsConfiguration conf = initZeroCopyTest();
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final int TEST_FILE_LENGTH = 5 * BLOCK_SIZE;
    final int RANDOM_SEED = 23453;
    final String CONTEXT = "testZeroCopyMmapCacheContext";
    FSDataInputStream fsIn = null;
    ByteBuffer[] results = { null, null, null, null };
    DistributedFileSystem fs = null;
    conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short) 1, RANDOM_SEED);
    try {
        DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
    } catch (InterruptedException e) {
        Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
    } catch (TimeoutException e) {
        Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
    }
    fsIn = fs.open(TEST_PATH);
    byte[] original = new byte[TEST_FILE_LENGTH];
    IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
    fsIn.close();
    fsIn = fs.open(TEST_PATH);
    final ShortCircuitCache cache = ClientContext.get(CONTEXT, conf).getShortCircuitCache();
    cache.accept(new CountingVisitor(0, 5, 5, 0));
    results[0] = fsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    fsIn.seek(0);
    results[1] = fsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    // The mmap should be of the first block of the file.
    final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
    cache.accept(new CacheVisitor() {

        @Override
        public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
            ShortCircuitReplica replica = replicas.get(new ExtendedBlockId(firstBlock.getBlockId(), firstBlock.getBlockPoolId()));
            Assert.assertNotNull(replica);
            Assert.assertTrue(replica.hasMmap());
            // The replica should not yet be evictable, since we have it open.
            Assert.assertNull(replica.getEvictableTimeNs());
        }
    });
    // Read more blocks.
    results[2] = fsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    results[3] = fsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    // we should have 3 mmaps, 1 evictable
    cache.accept(new CountingVisitor(3, 5, 2, 0));
    // using a very quick timeout)
    for (ByteBuffer buffer : results) {
        if (buffer != null) {
            fsIn.releaseBuffer(buffer);
        }
    }
    fsIn.close();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        public Boolean get() {
            final MutableBoolean finished = new MutableBoolean(false);
            cache.accept(new CacheVisitor() {

                @Override
                public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
                    finished.setValue(evictableMmapped.isEmpty());
                }
            });
            return finished.booleanValue();
        }
    }, 10, 60000);
    cache.accept(new CountingVisitor(0, -1, -1, -1));
    fs.close();
    cluster.shutdown();
}
Also used : ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) LinkedMap(org.apache.commons.collections.map.LinkedMap) CacheVisitor(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor) MutableBoolean(org.apache.commons.lang.mutable.MutableBoolean) TimeoutException(java.util.concurrent.TimeoutException) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) MutableBoolean(org.apache.commons.lang.mutable.MutableBoolean) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) ByteBuffer(java.nio.ByteBuffer) ShortCircuitReplica(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Map(java.util.Map) LinkedMap(org.apache.commons.collections.map.LinkedMap) Test(org.junit.Test)

Example 23 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestWebHDFS method testWebHdfsReadRetries.

@Test(timeout = 90000)
public void testWebHdfsReadRetries() throws Exception {
    // ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
    final Configuration conf = WebHdfsTestUtil.createConf();
    final Path dir = new Path("/testWebHdfsReadRetries");
    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 512);
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    final short numDatanodes = 1;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        cluster.waitActive();
        final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
        //create a file
        final long length = 1L << 20;
        final Path file1 = new Path(dir, "testFile");
        DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
        //get file status and check that it was written properly.
        final FileStatus s1 = fs.getFileStatus(file1);
        assertEquals("Write failed for file " + file1, length, s1.getLen());
        // Ensure file can be read through WebHdfsInputStream
        FSDataInputStream in = fs.open(file1);
        assertTrue("Input stream is not an instance of class WebHdfsInputStream", in.getWrappedStream() instanceof WebHdfsInputStream);
        int count = 0;
        for (; in.read() != -1; count++) ;
        assertEquals("Read failed for file " + file1, s1.getLen(), count);
        assertEquals("Sghould not be able to read beyond end of file", in.read(), -1);
        in.close();
        try {
            in.read();
            fail("Read after close should have failed");
        } catch (IOException ioe) {
        }
        WebHdfsFileSystem wfs = (WebHdfsFileSystem) fs;
        // Read should not be retried if AccessControlException is encountered.
        String msg = "ReadRetries: Test Access Control Exception";
        testReadRetryExceptionHelper(wfs, file1, new AccessControlException(msg), msg, false, 1);
        // Retry policy should be invoked if IOExceptions are thrown.
        msg = "ReadRetries: Test SocketTimeoutException";
        testReadRetryExceptionHelper(wfs, file1, new SocketTimeoutException(msg), msg, true, 5);
        msg = "ReadRetries: Test SocketException";
        testReadRetryExceptionHelper(wfs, file1, new SocketException(msg), msg, true, 5);
        msg = "ReadRetries: Test EOFException";
        testReadRetryExceptionHelper(wfs, file1, new EOFException(msg), msg, true, 5);
        msg = "ReadRetries: Test Generic IO Exception";
        testReadRetryExceptionHelper(wfs, file1, new IOException(msg), msg, true, 5);
        // If InvalidToken exception occurs, WebHdfs only retries if the
        // delegation token was replaced. Do that twice, then verify by checking
        // the number of times it tried.
        WebHdfsFileSystem spyfs = spy(wfs);
        when(spyfs.replaceExpiredDelegationToken()).thenReturn(true, true, false);
        msg = "ReadRetries: Test Invalid Token Exception";
        testReadRetryExceptionHelper(spyfs, file1, new InvalidToken(msg), msg, false, 3);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SocketException(java.net.SocketException) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) SocketTimeoutException(java.net.SocketTimeoutException) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) EOFException(java.io.EOFException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) WebHdfsInputStream(org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream) Test(org.junit.Test) HttpServerFunctionalTest(org.apache.hadoop.http.HttpServerFunctionalTest)

Example 24 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestWebHdfsTokens method validateLazyTokenFetch.

private void validateLazyTokenFetch(final Configuration clusterConf) throws Exception {
    final String testUser = "DummyUser";
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(testUser, new String[] { "supergroup" });
    WebHdfsFileSystem fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {

        @Override
        public WebHdfsFileSystem run() throws IOException {
            return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
        }
    });
    // verify token ops don't get a token
    Assert.assertNull(fs.getRenewToken());
    Token<?> token = fs.getDelegationToken(null);
    fs.renewDelegationToken(token);
    fs.cancelDelegationToken(token);
    verify(fs, never()).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).setDelegationToken(any());
    Assert.assertNull(fs.getRenewToken());
    reset(fs);
    // verify first non-token op gets a token
    final Path p = new Path("/f");
    fs.create(p, (short) 1).close();
    verify(fs, times(1)).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, times(1)).getDelegationToken(anyString());
    verify(fs, times(1)).setDelegationToken(any());
    token = fs.getRenewToken();
    Assert.assertNotNull(token);
    Assert.assertEquals(testUser, getTokenOwner(token));
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    reset(fs);
    // verify prior token is reused
    fs.getFileStatus(p);
    verify(fs, times(1)).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, never()).setDelegationToken(any());
    Token<?> token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertSame(token, token2);
    reset(fs);
    // verify renew of expired token fails w/o getting a new token
    token = fs.getRenewToken();
    fs.cancelDelegationToken(token);
    try {
        fs.renewDelegationToken(token);
        Assert.fail("should have failed");
    } catch (InvalidToken it) {
    } catch (Exception ex) {
        Assert.fail("wrong exception:" + ex);
    }
    verify(fs, never()).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, never()).setDelegationToken(any());
    token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertSame(token, token2);
    reset(fs);
    // verify cancel of expired token fails w/o getting a new token
    try {
        fs.cancelDelegationToken(token);
        Assert.fail("should have failed");
    } catch (InvalidToken it) {
    } catch (Exception ex) {
        Assert.fail("wrong exception:" + ex);
    }
    verify(fs, never()).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, never()).setDelegationToken(any());
    token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertSame(token, token2);
    reset(fs);
    // verify an expired token is replaced with a new token
    InputStream is = fs.open(p);
    is.read();
    is.close();
    // first bad, then good
    verify(fs, times(2)).getDelegationToken();
    verify(fs, times(1)).replaceExpiredDelegationToken();
    verify(fs, times(1)).getDelegationToken(null);
    verify(fs, times(1)).setDelegationToken(any());
    token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertNotSame(token, token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertEquals(testUser, getTokenOwner(token2));
    reset(fs);
    // verify with open because it's a little different in how it
    // opens connections
    fs.cancelDelegationToken(fs.getRenewToken());
    is = fs.open(p);
    is.read();
    is.close();
    // first bad, then good
    verify(fs, times(2)).getDelegationToken();
    verify(fs, times(1)).replaceExpiredDelegationToken();
    verify(fs, times(1)).getDelegationToken(null);
    verify(fs, times(1)).setDelegationToken(any());
    token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertNotSame(token, token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertEquals(testUser, getTokenOwner(token2));
    reset(fs);
    // verify fs close cancels the token
    fs.close();
    verify(fs, never()).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, never()).setDelegationToken(any());
    verify(fs, times(1)).cancelDelegationToken(eq(token2));
    // add a token to ugi for a new fs, verify it uses that token
    token = fs.getDelegationToken(null);
    ugi.addToken(token);
    fs = ugi.doAs(new PrivilegedExceptionAction<WebHdfsFileSystem>() {

        @Override
        public WebHdfsFileSystem run() throws IOException {
            return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
        }
    });
    Assert.assertNull(fs.getRenewToken());
    fs.getFileStatus(new Path("/"));
    verify(fs, times(1)).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, times(1)).setDelegationToken(eq(token));
    token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertSame(token, token2);
    reset(fs);
    // verify it reuses the prior ugi token
    fs.getFileStatus(new Path("/"));
    verify(fs, times(1)).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, never()).setDelegationToken(any());
    token2 = fs.getRenewToken();
    Assert.assertNotNull(token2);
    Assert.assertEquals(fs.getTokenKind(), token.getKind());
    Assert.assertSame(token, token2);
    reset(fs);
    // verify an expired ugi token is NOT replaced with a new token
    fs.cancelDelegationToken(token);
    for (int i = 0; i < 2; i++) {
        try {
            fs.getFileStatus(new Path("/"));
            Assert.fail("didn't fail");
        } catch (InvalidToken it) {
        } catch (Exception ex) {
            Assert.fail("wrong exception:" + ex);
        }
        verify(fs, times(1)).getDelegationToken();
        verify(fs, times(1)).replaceExpiredDelegationToken();
        verify(fs, never()).getDelegationToken(anyString());
        verify(fs, never()).setDelegationToken(any());
        token2 = fs.getRenewToken();
        Assert.assertNotNull(token2);
        Assert.assertEquals(fs.getTokenKind(), token.getKind());
        Assert.assertSame(token, token2);
        reset(fs);
    }
    // verify fs close does NOT cancel the ugi token
    fs.close();
    verify(fs, never()).getDelegationToken();
    verify(fs, never()).replaceExpiredDelegationToken();
    verify(fs, never()).getDelegationToken(anyString());
    verify(fs, never()).setDelegationToken(any());
    verify(fs, never()).cancelDelegationToken(any(Token.class));
}
Also used : Path(org.apache.hadoop.fs.Path) InputStream(java.io.InputStream) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) IOException(java.io.IOException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 25 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestDelegationToken method testDelegationToken.

@SuppressWarnings("deprecation")
@Test
public void testDelegationToken() throws Exception {
    final JobClient client;
    client = user1.doAs(new PrivilegedExceptionAction<JobClient>() {

        @Override
        public JobClient run() throws Exception {
            return new JobClient(cluster.createJobConf());
        }
    });
    final JobClient bobClient;
    bobClient = user2.doAs(new PrivilegedExceptionAction<JobClient>() {

        @Override
        public JobClient run() throws Exception {
            return new JobClient(cluster.createJobConf());
        }
    });
    final Token<DelegationTokenIdentifier> token = client.getDelegationToken(new Text(user1.getUserName()));
    DataInputBuffer inBuf = new DataInputBuffer();
    byte[] bytes = token.getIdentifier();
    inBuf.reset(bytes, bytes.length);
    DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
    ident.readFields(inBuf);
    assertEquals("alice", ident.getUser().getUserName());
    long createTime = ident.getIssueDate();
    long maxTime = ident.getMaxDate();
    long currentTime = System.currentTimeMillis();
    System.out.println("create time: " + createTime);
    System.out.println("current time: " + currentTime);
    System.out.println("max time: " + maxTime);
    assertTrue("createTime < current", createTime < currentTime);
    assertTrue("current < maxTime", currentTime < maxTime);
    // renew should work as user alice
    user1.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            client.renewDelegationToken(token);
            client.renewDelegationToken(token);
            return null;
        }
    });
    // bob should fail to renew
    user2.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            try {
                bobClient.renewDelegationToken(token);
                Assert.fail("bob renew");
            } catch (AccessControlException ace) {
            // PASS
            }
            return null;
        }
    });
    // bob should fail to cancel
    user2.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            try {
                bobClient.cancelDelegationToken(token);
                Assert.fail("bob cancel");
            } catch (AccessControlException ace) {
            // PASS
            }
            return null;
        }
    });
    // alice should be able to cancel but only cancel once
    user1.doAs(new PrivilegedExceptionAction<Void>() {

        @Override
        public Void run() throws Exception {
            client.cancelDelegationToken(token);
            try {
                client.cancelDelegationToken(token);
                Assert.fail("second alice cancel");
            } catch (InvalidToken it) {
            // PASS
            }
            return null;
        }
    });
}
Also used : AccessControlException(org.apache.hadoop.security.AccessControlException) Text(org.apache.hadoop.io.Text) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) JobClient(org.apache.hadoop.mapred.JobClient) AccessControlException(org.apache.hadoop.security.AccessControlException) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Test(org.junit.Test)

Aggregations

InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)28 Test (org.junit.Test)16 IOException (java.io.IOException)14 DataInputStream (java.io.DataInputStream)7 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)7 Configuration (org.apache.hadoop.conf.Configuration)6 Text (org.apache.hadoop.io.Text)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 ByteArrayInputStream (java.io.ByteArrayInputStream)5 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)5 ByteBuffer (java.nio.ByteBuffer)4 Credentials (org.apache.hadoop.security.Credentials)4 Token (org.apache.hadoop.security.token.Token)4 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)3 ExtendedBlockId (org.apache.hadoop.hdfs.ExtendedBlockId)3 ShortCircuitCache (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache)3 AccessControlException (org.apache.hadoop.security.AccessControlException)3 ContainerTokenIdentifier (org.apache.hadoop.yarn.security.ContainerTokenIdentifier)3 EOFException (java.io.EOFException)2 InetSocketAddress (java.net.InetSocketAddress)2