Search in sources :

Example 1 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class SaslRpcServer method getIdentifier.

public static <T extends TokenIdentifier> T getIdentifier(String id, SecretManager<T> secretManager) throws InvalidToken {
    byte[] tokenId = decodeIdentifier(id);
    T tokenIdentifier = secretManager.createIdentifier();
    try {
        tokenIdentifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
    } catch (IOException e) {
        throw (InvalidToken) new InvalidToken("Can't de-serialize tokenIdentifier").initCause(e);
    }
    return tokenIdentifier;
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream)

Example 2 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class DataXceiver method run.

/**
   * Read/write data from/to the DataXceiverServer.
   */
@Override
public void run() {
    int opsProcessed = 0;
    Op op = null;
    try {
        synchronized (this) {
            xceiver = Thread.currentThread();
        }
        dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
        peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
        InputStream input = socketIn;
        try {
            IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut, socketIn, datanode.getXferAddress().getPort(), datanode.getDatanodeId());
            input = new BufferedInputStream(saslStreams.in, smallBufferSize);
            socketOut = saslStreams.out;
        } catch (InvalidMagicNumberException imne) {
            if (imne.isHandshake4Encryption()) {
                LOG.info("Failed to read expected encryption handshake from client " + "at " + peer.getRemoteAddressString() + ". Perhaps the client " + "is running an older version of Hadoop which does not support " + "encryption");
            } else {
                LOG.info("Failed to read expected SASL data transfer protection " + "handshake from client at " + peer.getRemoteAddressString() + ". Perhaps the client is running an older version of Hadoop " + "which does not support SASL data transfer protection");
            }
            return;
        }
        super.initialize(new DataInputStream(input));
        // Setting keepalive timeout to 0 disable this behavior.
        do {
            updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
            try {
                if (opsProcessed != 0) {
                    assert dnConf.socketKeepaliveTimeout > 0;
                    peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
                } else {
                    peer.setReadTimeout(dnConf.socketTimeout);
                }
                op = readOp();
            } catch (InterruptedIOException ignored) {
                // Time out while we wait for client rpc
                break;
            } catch (EOFException | ClosedChannelException e) {
                // Since we optimistically expect the next op, it's quite normal to
                // get EOF here.
                LOG.debug("Cached {} closing after {} ops.  " + "This message is usually benign.", peer, opsProcessed);
                break;
            } catch (IOException err) {
                incrDatanodeNetworkErrors();
                throw err;
            }
            // restore normal timeout
            if (opsProcessed != 0) {
                peer.setReadTimeout(dnConf.socketTimeout);
            }
            opStartTime = monotonicNow();
            processOp(op);
            ++opsProcessed;
        } while ((peer != null) && (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
        String s = datanode.getDisplayName() + ":DataXceiver error processing " + ((op == null) ? "unknown" : op.name()) + " operation " + " src: " + remoteAddress + " dst: " + localAddress;
        if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
            // at the same time.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            } else {
                LOG.info(s + "; " + t);
            }
        } else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
            String s1 = "Likely the client has stopped reading, disconnecting it";
            s1 += " (" + s + ")";
            if (LOG.isTraceEnabled()) {
                LOG.trace(s1, t);
            } else {
                LOG.info(s1 + "; " + t);
            }
        } else if (t instanceof InvalidToken) {
            // checkAccess() method and this is not a server error.
            if (LOG.isTraceEnabled()) {
                LOG.trace(s, t);
            }
        } else {
            LOG.error(s, t);
        }
    } finally {
        collectThreadLocalStates();
        if (LOG.isDebugEnabled()) {
            LOG.debug(datanode.getDisplayName() + ":Number of active connections is: " + datanode.getXceiverCount());
        }
        updateCurrentThreadName("Cleaning up");
        if (peer != null) {
            dataXceiverServer.closePeer(peer);
            IOUtils.closeStream(in);
        }
    }
}
Also used : Op(org.apache.hadoop.hdfs.protocol.datatransfer.Op) InterruptedIOException(java.io.InterruptedIOException) ClosedChannelException(java.nio.channels.ClosedChannelException) BufferedInputStream(java.io.BufferedInputStream) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) InvalidMagicNumberException(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteString(com.google.protobuf.ByteString) DataInputStream(java.io.DataInputStream) SocketTimeoutException(java.net.SocketTimeoutException) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedInputStream(java.io.BufferedInputStream) EOFException(java.io.EOFException) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken)

Example 3 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestDelegationToken method testDelegationTokenSecretManager.

@Test
public void testDelegationTokenSecretManager() throws Exception {
    Token<DelegationTokenIdentifier> token = generateDelegationToken("SomeUser", "JobTracker");
    // Fake renewer should not be able to renew
    try {
        dtSecretManager.renewToken(token, "FakeRenewer");
        Assert.fail("should have failed");
    } catch (AccessControlException ace) {
    // PASS
    }
    dtSecretManager.renewToken(token, "JobTracker");
    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
    byte[] tokenId = token.getIdentifier();
    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
    LOG.info("Sleep to expire the token");
    Thread.sleep(6000);
    //Token should be expired
    try {
        dtSecretManager.retrievePassword(identifier);
        //Should not come here
        Assert.fail("Token should have expired");
    } catch (InvalidToken e) {
    //Success
    }
    dtSecretManager.renewToken(token, "JobTracker");
    LOG.info("Sleep beyond the max lifetime");
    Thread.sleep(5000);
    try {
        dtSecretManager.renewToken(token, "JobTracker");
        Assert.fail("should have been expired");
    } catch (InvalidToken it) {
    // PASS
    }
}
Also used : DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) ByteArrayInputStream(java.io.ByteArrayInputStream) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) AccessControlException(org.apache.hadoop.security.AccessControlException) DataInputStream(java.io.DataInputStream) Test(org.junit.Test)

Example 4 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestShortCircuitCache method testShmBasedStaleness.

@Test(timeout = 60000)
public void testShmBasedStaleness() throws Exception {
    BlockReaderTestUtil.enableShortCircuitShmTracing();
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShmBasedStaleness", sockDir);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 8193;
    final int SEED = 0xFADED;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    FSDataInputStream fis = fs.open(new Path(TEST_FILE));
    int first = fis.read();
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
    Assert.assertTrue(first != -1);
    cache.accept(new CacheVisitor() {

        @Override
        public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
            ShortCircuitReplica replica = replicas.get(ExtendedBlockId.fromExtendedBlock(block));
            Assert.assertNotNull(replica);
            Assert.assertTrue(replica.getSlot().isValid());
        }
    });
    // Stop the Namenode.  This will close the socket keeping the client's
    // shared memory segment alive, and make it stale.
    cluster.getDataNodes().get(0).shutdown();
    cache.accept(new CacheVisitor() {

        @Override
        public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
            ShortCircuitReplica replica = replicas.get(ExtendedBlockId.fromExtendedBlock(block));
            Assert.assertNotNull(replica);
            Assert.assertFalse(replica.getSlot().isValid());
        }
    });
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LinkedMap(org.apache.commons.collections.map.LinkedMap) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) CacheVisitor(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Example 5 with InvalidToken

use of org.apache.hadoop.security.token.SecretManager.InvalidToken in project hadoop by apache.

the class TestAMRMClient method testAMRMClientOnAMRMTokenRollOver.

@Test(timeout = 60000)
public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException {
    AMRMClient<ContainerRequest> amClient = null;
    try {
        AMRMTokenSecretManager amrmTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getAMRMTokenSecretManager();
        // start am rm client
        amClient = AMRMClient.<ContainerRequest>createAMRMClient();
        amClient.init(conf);
        amClient.start();
        Long startTime = System.currentTimeMillis();
        amClient.registerApplicationMaster("Host", 10000, "");
        org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken_1 = getAMRMToken();
        Assert.assertNotNull(amrmToken_1);
        Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
        // At mean time, the old AMRMToken should continue to work
        while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
            amClient.allocate(0.1f);
            sleep(1000);
        }
        amClient.allocate(0.1f);
        org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken_2 = getAMRMToken();
        Assert.assertNotNull(amrmToken_2);
        Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
        Assert.assertNotEquals(amrmToken_1, amrmToken_2);
        // can do the allocate call with latest AMRMToken
        AllocateResponse response = amClient.allocate(0.1f);
        // Verify latest AMRMToken can be used to send allocation request.
        UserGroupInformation testUser1 = UserGroupInformation.createRemoteUser("testUser1");
        AMRMTokenIdentifierForTest newVersionTokenIdentifier = new AMRMTokenIdentifierForTest(amrmToken_2.decodeIdentifier(), "message");
        Assert.assertEquals("Message is changed after set to newVersionTokenIdentifier", "message", newVersionTokenIdentifier.getMessage());
        org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newVersionToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(newVersionTokenIdentifier.getBytes(), amrmTokenSecretManager.retrievePassword(newVersionTokenIdentifier), newVersionTokenIdentifier.getKind(), new Text());
        SecurityUtil.setTokenService(newVersionToken, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress());
        testUser1.addToken(newVersionToken);
        AllocateRequest request = Records.newRecord(AllocateRequest.class);
        request.setResponseId(response.getResponseId());
        testUser1.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {

            @Override
            public ApplicationMasterProtocol run() {
                return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(), conf);
            }
        }).allocate(request);
        // and can not use this rolled-over token to make a allocate all.
        while (true) {
            if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getCurrnetMasterKeyData().getMasterKey().getKeyId()) {
                if (amrmTokenSecretManager.getNextMasterKeyData() == null) {
                    break;
                } else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey().getKeyId()) {
                    break;
                }
            }
            amClient.allocate(0.1f);
            sleep(1000);
        }
        try {
            UserGroupInformation testUser2 = UserGroupInformation.createRemoteUser("testUser2");
            SecurityUtil.setTokenService(amrmToken_2, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress());
            testUser2.addToken(amrmToken_2);
            testUser2.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {

                @Override
                public ApplicationMasterProtocol run() {
                    return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(), conf);
                }
            }).allocate(Records.newRecord(AllocateRequest.class));
            Assert.fail("The old Token should not work");
        } catch (Exception ex) {
            Assert.assertTrue(ex instanceof InvalidToken);
            Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId()));
        }
        amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null);
    } finally {
        if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
            amClient.stop();
        }
    }
}
Also used : AllocateRequest(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Text(org.apache.hadoop.io.Text) ApplicationMasterProtocol(org.apache.hadoop.yarn.api.ApplicationMasterProtocol) AMRMTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) InvalidContainerRequestException(org.apache.hadoop.yarn.client.api.InvalidContainerRequestException) AllocateResponse(org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) PrivilegedAction(java.security.PrivilegedAction) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) ContainerRequest(org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)28 Test (org.junit.Test)16 IOException (java.io.IOException)14 DataInputStream (java.io.DataInputStream)7 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)7 Configuration (org.apache.hadoop.conf.Configuration)6 Text (org.apache.hadoop.io.Text)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)6 ByteArrayInputStream (java.io.ByteArrayInputStream)5 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)5 ByteBuffer (java.nio.ByteBuffer)4 Credentials (org.apache.hadoop.security.Credentials)4 Token (org.apache.hadoop.security.token.Token)4 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)3 ExtendedBlockId (org.apache.hadoop.hdfs.ExtendedBlockId)3 ShortCircuitCache (org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache)3 AccessControlException (org.apache.hadoop.security.AccessControlException)3 ContainerTokenIdentifier (org.apache.hadoop.yarn.security.ContainerTokenIdentifier)3 EOFException (java.io.EOFException)2 InetSocketAddress (java.net.InetSocketAddress)2