Search in sources :

Example 26 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project gora by apache.

the class TestIOUtils method testNullFieldsWith.

private void testNullFieldsWith(Object... values) throws IOException {
    DataOutputBuffer out = new DataOutputBuffer();
    DataInputBuffer in = new DataInputBuffer();
    IOUtils.writeNullFieldsInfo(out, values);
    in.reset(out.getData(), out.getLength());
    boolean[] ret = IOUtils.readNullFieldsInfo(in);
    //assert
    assertEquals(values.length, ret.length);
    for (int i = 0; i < values.length; i++) {
        assertEquals(values[i] == null, ret[i]);
    }
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer)

Example 27 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestClientRMTokens method testDelegationToken.

@Test
public void testDelegationToken() throws IOException, InterruptedException {
    final YarnConfiguration conf = new YarnConfiguration();
    conf.set(YarnConfiguration.RM_PRINCIPAL, "testuser/localhost@apache.org");
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    ResourceScheduler scheduler = createMockScheduler(conf);
    long initialInterval = 10000l;
    long maxLifetime = 20000l;
    long renewInterval = 10000l;
    RMDelegationTokenSecretManager rmDtSecretManager = createRMDelegationTokenSecretManager(initialInterval, maxLifetime, renewInterval);
    rmDtSecretManager.startThreads();
    LOG.info("Creating DelegationTokenSecretManager with initialInterval: " + initialInterval + ", maxLifetime: " + maxLifetime + ", renewInterval: " + renewInterval);
    final ClientRMService clientRMService = new ClientRMServiceForTest(conf, scheduler, rmDtSecretManager);
    clientRMService.init(conf);
    clientRMService.start();
    ApplicationClientProtocol clientRMWithDT = null;
    try {
        // Create a user for the renewr and fake the authentication-method
        UserGroupInformation loggedInUser = UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG");
        Assert.assertEquals("testrenewer", loggedInUser.getShortUserName());
        // Default realm is APACHE.ORG
        loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
        org.apache.hadoop.yarn.api.records.Token token = getDelegationToken(loggedInUser, clientRMService, loggedInUser.getShortUserName());
        long tokenFetchTime = System.currentTimeMillis();
        LOG.info("Got delegation token at: " + tokenFetchTime);
        // Now try talking to RMService using the delegation token
        clientRMWithDT = getClientRMProtocolWithDT(token, clientRMService.getBindAddress(), "loginuser1", conf);
        GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class);
        try {
            clientRMWithDT.getNewApplication(request);
        } catch (IOException e) {
            fail("Unexpected exception" + e);
        } catch (YarnException e) {
            fail("Unexpected exception" + e);
        }
        // Renew after 50% of token age.
        while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) {
            Thread.sleep(500l);
        }
        long nextExpTime = renewDelegationToken(loggedInUser, clientRMService, token);
        long renewalTime = System.currentTimeMillis();
        LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: " + nextExpTime);
        // Wait for first expiry, but before renewed expiry.
        while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) {
            Thread.sleep(500l);
        }
        Thread.sleep(50l);
        // Valid token because of renewal.
        try {
            clientRMWithDT.getNewApplication(request);
        } catch (IOException e) {
            fail("Unexpected exception" + e);
        } catch (YarnException e) {
            fail("Unexpected exception" + e);
        }
        // Wait for expiry.
        while (System.currentTimeMillis() < renewalTime + renewInterval) {
            Thread.sleep(500l);
        }
        Thread.sleep(50l);
        LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid");
        // Token should have expired.      
        try {
            clientRMWithDT.getNewApplication(request);
            fail("Should not have succeeded with an expired token");
        } catch (Exception e) {
            assertEquals(InvalidToken.class.getName(), e.getClass().getName());
            assertTrue(e.getMessage().contains("is expired"));
        }
        // Stop the existing proxy, start another.
        if (clientRMWithDT != null) {
            RPC.stopProxy(clientRMWithDT);
            clientRMWithDT = null;
        }
        token = getDelegationToken(loggedInUser, clientRMService, loggedInUser.getShortUserName());
        tokenFetchTime = System.currentTimeMillis();
        LOG.info("Got delegation token at: " + tokenFetchTime);
        // Now try talking to RMService using the delegation token
        clientRMWithDT = getClientRMProtocolWithDT(token, clientRMService.getBindAddress(), "loginuser2", conf);
        request = Records.newRecord(GetNewApplicationRequest.class);
        try {
            clientRMWithDT.getNewApplication(request);
        } catch (IOException e) {
            fail("Unexpected exception" + e);
        } catch (YarnException e) {
            fail("Unexpected exception" + e);
        }
        cancelDelegationToken(loggedInUser, clientRMService, token);
        if (clientRMWithDT != null) {
            RPC.stopProxy(clientRMWithDT);
            clientRMWithDT = null;
        }
        // Creating a new connection.
        clientRMWithDT = getClientRMProtocolWithDT(token, clientRMService.getBindAddress(), "loginuser2", conf);
        LOG.info("Cancelled delegation token at: " + System.currentTimeMillis());
        // Verify cancellation worked.
        try {
            clientRMWithDT.getNewApplication(request);
            fail("Should not have succeeded with a cancelled delegation token");
        } catch (IOException e) {
        } catch (YarnException e) {
        }
        // Stop the existing proxy, start another.
        if (clientRMWithDT != null) {
            RPC.stopProxy(clientRMWithDT);
            clientRMWithDT = null;
        }
        token = getDelegationToken(loggedInUser, clientRMService, loggedInUser.getShortUserName());
        byte[] tokenIdentifierContent = token.getIdentifier().array();
        RMDelegationTokenIdentifier tokenIdentifier = new RMDelegationTokenIdentifier();
        DataInputBuffer dib = new DataInputBuffer();
        dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
        tokenIdentifier.readFields(dib);
        // Construct new version RMDelegationTokenIdentifier with additional field
        RMDelegationTokenIdentifierForTest newVersionTokenIdentifier = new RMDelegationTokenIdentifierForTest(tokenIdentifier, "message");
        Token<RMDelegationTokenIdentifier> newRMDTtoken = new Token<RMDelegationTokenIdentifier>(newVersionTokenIdentifier, rmDtSecretManager);
        org.apache.hadoop.yarn.api.records.Token newToken = BuilderUtils.newDelegationToken(newRMDTtoken.getIdentifier(), newRMDTtoken.getKind().toString(), newRMDTtoken.getPassword(), newRMDTtoken.getService().toString());
        // Now try talking to RMService using the new version delegation token
        clientRMWithDT = getClientRMProtocolWithDT(newToken, clientRMService.getBindAddress(), "loginuser3", conf);
        request = Records.newRecord(GetNewApplicationRequest.class);
        try {
            clientRMWithDT.getNewApplication(request);
        } catch (IOException e) {
            fail("Unexpected exception" + e);
        } catch (YarnException e) {
            fail("Unexpected exception" + e);
        }
    } finally {
        rmDtSecretManager.stopThreads();
        // TODO PRECOMMIT Close proxies.
        if (clientRMWithDT != null) {
            RPC.stopProxy(clientRMWithDT);
        }
    }
}
Also used : InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) RMDelegationTokenIdentifier(org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier) ApplicationClientProtocol(org.apache.hadoop.yarn.api.ApplicationClientProtocol) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException) GetNewApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) RMDelegationTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ResourceScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 28 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hbase by apache.

the class Writables method getWritable.

/**
   * Set bytes into the passed Writable by calling its
   * {@link Writable#readFields(java.io.DataInput)}.
   * @param bytes serialized bytes
   * @param offset offset into array
   * @param length length of data
   * @param w An empty Writable (usually made by calling the null-arg
   * constructor).
   * @return The passed Writable after its readFields has been called fed
   * by the passed <code>bytes</code> array or IllegalArgumentException
   * if passed null or an empty <code>bytes</code> array.
   * @throws IOException e
   * @throws IllegalArgumentException
   */
public static Writable getWritable(final byte[] bytes, final int offset, final int length, final Writable w) throws IOException {
    if (bytes == null || length <= 0) {
        throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array");
    }
    if (w == null) {
        throw new IllegalArgumentException("Writable cannot be null");
    }
    DataInputBuffer in = new DataInputBuffer();
    try {
        in.reset(bytes, offset, length);
        w.readFields(in);
        return w;
    } finally {
        in.close();
    }
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer)

Example 29 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestContainerManagerSecurity method getContainerTokenIdentifierFromToken.

private ContainerTokenIdentifier getContainerTokenIdentifierFromToken(Token containerToken) throws IOException {
    ContainerTokenIdentifier containerTokenIdentifier;
    containerTokenIdentifier = new ContainerTokenIdentifier();
    byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
    containerTokenIdentifier.readFields(dib);
    return containerTokenIdentifier;
}
Also used : DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier)

Example 30 with DataInputBuffer

use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.

the class TestContainerManagerSecurity method testNMTokens.

private void testNMTokens(Configuration conf) throws Exception {
    NMTokenSecretManagerInRM nmTokenSecretManagerRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
    NMTokenSecretManagerInNM nmTokenSecretManagerNM = yarnCluster.getNodeManager(0).getNMContext().getNMTokenSecretManager();
    RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
    NodeManager nm = yarnCluster.getNodeManager(0);
    waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM, nm);
    // Both id should be equal.
    Assert.assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(), nmTokenSecretManagerRM.getCurrentKey().getKeyId());
    /*
     * Below cases should be tested.
     * 1) If Invalid NMToken is used then it should be rejected.
     * 2) If valid NMToken but belonging to another Node is used then that
     * too should be rejected.
     * 3) NMToken for say appAttempt-1 is used for starting/stopping/retrieving
     * status for container with containerId for say appAttempt-2 should
     * be rejected.
     * 4) After start container call is successful nmtoken should have been
     * saved in NMTokenSecretManagerInNM.
     * 5) If start container call was successful (no matter if container is
     * still running or not), appAttempt->NMToken should be present in
     * NMTokenSecretManagerInNM's cache. Any future getContainerStatus call
     * for containerId belonging to that application attempt using
     * applicationAttempt's older nmToken should not get any invalid
     * nmToken error. (This can be best tested if we roll over NMToken
     * master key twice).
     */
    YarnRPC rpc = YarnRPC.create(conf);
    String user = "test";
    Resource r = Resource.newInstance(1024, 1);
    ApplicationId appId = ApplicationId.newInstance(1, 1);
    MockRMApp m = new MockRMApp(appId.getId(), appId.getClusterTimestamp(), RMAppState.NEW);
    yarnCluster.getResourceManager().getRMContext().getRMApps().put(appId, m);
    ApplicationAttemptId validAppAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    ContainerId validContainerId = ContainerId.newContainerId(validAppAttemptId, 0);
    NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId();
    NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234);
    org.apache.hadoop.yarn.api.records.Token validNMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
    org.apache.hadoop.yarn.api.records.Token validContainerToken = containerTokenSecretManager.createContainerToken(validContainerId, 0, validNode, user, r, Priority.newInstance(10), 1234);
    ContainerTokenIdentifier identifier = BuilderUtils.newContainerTokenIdentifier(validContainerToken);
    Assert.assertEquals(Priority.newInstance(10), identifier.getPriority());
    Assert.assertEquals(1234, identifier.getCreationTime());
    StringBuilder sb;
    // testInvalidNMToken ... creating NMToken using different secret manager.
    NMTokenSecretManagerInRM tempManager = new NMTokenSecretManagerInRM(conf);
    tempManager.rollMasterKey();
    do {
        tempManager.rollMasterKey();
        tempManager.activateNextMasterKey();
    // Making sure key id is different.
    } while (tempManager.getCurrentKey().getKeyId() == nmTokenSecretManagerRM.getCurrentKey().getKeyId());
    // Testing that NM rejects the requests when we don't send any token.
    if (UserGroupInformation.isSecurityEnabled()) {
        sb = new StringBuilder("Client cannot authenticate via:[TOKEN]");
    } else {
        sb = new StringBuilder("SIMPLE authentication is not enabled.  Available:[TOKEN]");
    }
    String errorMsg = testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, null, true);
    Assert.assertTrue(errorMsg.contains(sb.toString()));
    org.apache.hadoop.yarn.api.records.Token invalidNMToken = tempManager.createNMToken(validAppAttemptId, validNode, user);
    sb = new StringBuilder("Given NMToken for application : ");
    sb.append(validAppAttemptId.toString()).append(" seems to have been generated illegally.");
    Assert.assertTrue(sb.toString().contains(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, invalidNMToken, true)));
    // valid NMToken but belonging to other node
    invalidNMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, invalidNode, user);
    sb = new StringBuilder("Given NMToken for application : ");
    sb.append(validAppAttemptId).append(" is not valid for current node manager.expected : ").append(validNode.toString()).append(" found : ").append(invalidNode.toString());
    Assert.assertTrue(sb.toString().contains(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, invalidNMToken, true)));
    // using correct tokens. nmtoken for app attempt should get saved.
    conf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, 4 * 60 * 1000);
    validContainerToken = containerTokenSecretManager.createContainerToken(validContainerId, 0, validNode, user, r, Priority.newInstance(0), 0);
    Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, validNMToken, false).isEmpty());
    Assert.assertTrue(nmTokenSecretManagerNM.isAppAttemptNMTokenKeyPresent(validAppAttemptId));
    // using a new compatible version nmtoken, expect container can be started 
    // successfully.
    ApplicationAttemptId validAppAttemptId2 = ApplicationAttemptId.newInstance(appId, 2);
    ContainerId validContainerId2 = ContainerId.newContainerId(validAppAttemptId2, 0);
    org.apache.hadoop.yarn.api.records.Token validContainerToken2 = containerTokenSecretManager.createContainerToken(validContainerId2, 0, validNode, user, r, Priority.newInstance(0), 0);
    org.apache.hadoop.yarn.api.records.Token validNMToken2 = nmTokenSecretManagerRM.createNMToken(validAppAttemptId2, validNode, user);
    // First, get a new NMTokenIdentifier.
    NMTokenIdentifier newIdentifier = new NMTokenIdentifier();
    byte[] tokenIdentifierContent = validNMToken2.getIdentifier().array();
    DataInputBuffer dib = new DataInputBuffer();
    dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
    newIdentifier.readFields(dib);
    // Then, generate a new version NMTokenIdentifier (NMTokenIdentifierNewForTest)
    // with additional field of message.
    NMTokenIdentifierNewForTest newVersionIdentifier = new NMTokenIdentifierNewForTest(newIdentifier, "message");
    // check new version NMTokenIdentifier has correct info.
    Assert.assertEquals("The ApplicationAttemptId is changed after set to " + "newVersionIdentifier", validAppAttemptId2.getAttemptId(), newVersionIdentifier.getApplicationAttemptId().getAttemptId());
    Assert.assertEquals("The message is changed after set to newVersionIdentifier", "message", newVersionIdentifier.getMessage());
    Assert.assertEquals("The NodeId is changed after set to newVersionIdentifier", validNode, newVersionIdentifier.getNodeId());
    // create new Token based on new version NMTokenIdentifier.
    org.apache.hadoop.yarn.api.records.Token newVersionedNMToken = BaseNMTokenSecretManager.newInstance(nmTokenSecretManagerRM.retrievePassword(newVersionIdentifier), newVersionIdentifier);
    // Verify startContainer is successful and no exception is thrown.
    Assert.assertTrue(testStartContainer(rpc, validAppAttemptId2, validNode, validContainerToken2, newVersionedNMToken, false).isEmpty());
    Assert.assertTrue(nmTokenSecretManagerNM.isAppAttemptNMTokenKeyPresent(validAppAttemptId2));
    //Now lets wait till container finishes and is removed from node manager.
    waitForContainerToFinishOnNM(validContainerId);
    sb = new StringBuilder("Attempt to relaunch the same container with id ");
    sb.append(validContainerId);
    Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, validNMToken, true).contains(sb.toString()));
    // Container is removed from node manager's memory by this time.
    // trying to stop the container. It should not throw any exception.
    testStopContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, false);
    // Rolling over master key twice so that we can check whether older keys
    // are used for authentication.
    rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
    // Key rolled over once.. rolling over again
    rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
    // trying get container status. Now saved nmToken should be used for
    // authentication... It should complain saying container was recently
    // stopped.
    sb = new StringBuilder("Container ");
    sb.append(validContainerId);
    sb.append(" was recently stopped on node manager");
    Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, true).contains(sb.toString()));
    // Now lets remove the container from nm-memory
    nm.getNodeStatusUpdater().clearFinishedContainersFromCache();
    // This should fail as container is removed from recently tracked finished
    // containers.
    sb = new StringBuilder("Container ");
    sb.append(validContainerId.toString());
    sb.append(" is not handled by this NodeManager");
    Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, false).contains(sb.toString()));
    // using appAttempt-1 NMtoken for launching container for appAttempt-2 should
    // succeed.
    ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(appId, 2);
    Token attempt1NMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
    org.apache.hadoop.yarn.api.records.Token newContainerToken = containerTokenSecretManager.createContainerToken(ContainerId.newContainerId(attempt2, 1), 0, validNode, user, r, Priority.newInstance(0), 0);
    Assert.assertTrue(testStartContainer(rpc, attempt2, validNode, newContainerToken, attempt1NMToken, false).isEmpty());
}
Also used : NMTokenIdentifier(org.apache.hadoop.yarn.security.NMTokenIdentifier) MockRMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp) Resource(org.apache.hadoop.yarn.api.records.Resource) NMTokenSecretManagerInNM(org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM) InvalidToken(org.apache.hadoop.security.token.SecretManager.InvalidToken) Token(org.apache.hadoop.yarn.api.records.Token) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) NMTokenSecretManagerInRM(org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier) NodeManager(org.apache.hadoop.yarn.server.nodemanager.NodeManager) Token(org.apache.hadoop.yarn.api.records.Token) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) RMContainerTokenSecretManager(org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Aggregations

DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)68 Test (org.junit.Test)37 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)36 IOException (java.io.IOException)16 Text (org.apache.hadoop.io.Text)10 BufferedInputStream (java.io.BufferedInputStream)8 DataInputStream (java.io.DataInputStream)8 Random (java.util.Random)8 Configuration (org.apache.hadoop.conf.Configuration)7 Path (org.apache.hadoop.fs.Path)7 DataOutputStream (java.io.DataOutputStream)6 BufferedOutputStream (java.io.BufferedOutputStream)5 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)5 FileSystem (org.apache.hadoop.fs.FileSystem)4 BytesWritable (org.apache.hadoop.io.BytesWritable)4 InputStream (java.io.InputStream)3 HashMap (java.util.HashMap)3 RandomDatum (org.apache.hadoop.io.RandomDatum)3 InvalidToken (org.apache.hadoop.security.token.SecretManager.InvalidToken)3 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)3