use of org.apache.hadoop.io.DataInputBuffer in project gora by apache.
the class TestIOUtils method testNullFieldsWith.
private void testNullFieldsWith(Object... values) throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
IOUtils.writeNullFieldsInfo(out, values);
in.reset(out.getData(), out.getLength());
boolean[] ret = IOUtils.readNullFieldsInfo(in);
//assert
assertEquals(values.length, ret.length);
for (int i = 0; i < values.length; i++) {
assertEquals(values[i] == null, ret[i]);
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestClientRMTokens method testDelegationToken.
@Test
public void testDelegationToken() throws IOException, InterruptedException {
final YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_PRINCIPAL, "testuser/localhost@apache.org");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
ResourceScheduler scheduler = createMockScheduler(conf);
long initialInterval = 10000l;
long maxLifetime = 20000l;
long renewInterval = 10000l;
RMDelegationTokenSecretManager rmDtSecretManager = createRMDelegationTokenSecretManager(initialInterval, maxLifetime, renewInterval);
rmDtSecretManager.startThreads();
LOG.info("Creating DelegationTokenSecretManager with initialInterval: " + initialInterval + ", maxLifetime: " + maxLifetime + ", renewInterval: " + renewInterval);
final ClientRMService clientRMService = new ClientRMServiceForTest(conf, scheduler, rmDtSecretManager);
clientRMService.init(conf);
clientRMService.start();
ApplicationClientProtocol clientRMWithDT = null;
try {
// Create a user for the renewr and fake the authentication-method
UserGroupInformation loggedInUser = UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG");
Assert.assertEquals("testrenewer", loggedInUser.getShortUserName());
// Default realm is APACHE.ORG
loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
org.apache.hadoop.yarn.api.records.Token token = getDelegationToken(loggedInUser, clientRMService, loggedInUser.getShortUserName());
long tokenFetchTime = System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
// Now try talking to RMService using the delegation token
clientRMWithDT = getClientRMProtocolWithDT(token, clientRMService.getBindAddress(), "loginuser1", conf);
GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class);
try {
clientRMWithDT.getNewApplication(request);
} catch (IOException e) {
fail("Unexpected exception" + e);
} catch (YarnException e) {
fail("Unexpected exception" + e);
}
// Renew after 50% of token age.
while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) {
Thread.sleep(500l);
}
long nextExpTime = renewDelegationToken(loggedInUser, clientRMService, token);
long renewalTime = System.currentTimeMillis();
LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: " + nextExpTime);
// Wait for first expiry, but before renewed expiry.
while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) {
Thread.sleep(500l);
}
Thread.sleep(50l);
// Valid token because of renewal.
try {
clientRMWithDT.getNewApplication(request);
} catch (IOException e) {
fail("Unexpected exception" + e);
} catch (YarnException e) {
fail("Unexpected exception" + e);
}
// Wait for expiry.
while (System.currentTimeMillis() < renewalTime + renewInterval) {
Thread.sleep(500l);
}
Thread.sleep(50l);
LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid");
// Token should have expired.
try {
clientRMWithDT.getNewApplication(request);
fail("Should not have succeeded with an expired token");
} catch (Exception e) {
assertEquals(InvalidToken.class.getName(), e.getClass().getName());
assertTrue(e.getMessage().contains("is expired"));
}
// Stop the existing proxy, start another.
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
clientRMWithDT = null;
}
token = getDelegationToken(loggedInUser, clientRMService, loggedInUser.getShortUserName());
tokenFetchTime = System.currentTimeMillis();
LOG.info("Got delegation token at: " + tokenFetchTime);
// Now try talking to RMService using the delegation token
clientRMWithDT = getClientRMProtocolWithDT(token, clientRMService.getBindAddress(), "loginuser2", conf);
request = Records.newRecord(GetNewApplicationRequest.class);
try {
clientRMWithDT.getNewApplication(request);
} catch (IOException e) {
fail("Unexpected exception" + e);
} catch (YarnException e) {
fail("Unexpected exception" + e);
}
cancelDelegationToken(loggedInUser, clientRMService, token);
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
clientRMWithDT = null;
}
// Creating a new connection.
clientRMWithDT = getClientRMProtocolWithDT(token, clientRMService.getBindAddress(), "loginuser2", conf);
LOG.info("Cancelled delegation token at: " + System.currentTimeMillis());
// Verify cancellation worked.
try {
clientRMWithDT.getNewApplication(request);
fail("Should not have succeeded with a cancelled delegation token");
} catch (IOException e) {
} catch (YarnException e) {
}
// Stop the existing proxy, start another.
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
clientRMWithDT = null;
}
token = getDelegationToken(loggedInUser, clientRMService, loggedInUser.getShortUserName());
byte[] tokenIdentifierContent = token.getIdentifier().array();
RMDelegationTokenIdentifier tokenIdentifier = new RMDelegationTokenIdentifier();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
tokenIdentifier.readFields(dib);
// Construct new version RMDelegationTokenIdentifier with additional field
RMDelegationTokenIdentifierForTest newVersionTokenIdentifier = new RMDelegationTokenIdentifierForTest(tokenIdentifier, "message");
Token<RMDelegationTokenIdentifier> newRMDTtoken = new Token<RMDelegationTokenIdentifier>(newVersionTokenIdentifier, rmDtSecretManager);
org.apache.hadoop.yarn.api.records.Token newToken = BuilderUtils.newDelegationToken(newRMDTtoken.getIdentifier(), newRMDTtoken.getKind().toString(), newRMDTtoken.getPassword(), newRMDTtoken.getService().toString());
// Now try talking to RMService using the new version delegation token
clientRMWithDT = getClientRMProtocolWithDT(newToken, clientRMService.getBindAddress(), "loginuser3", conf);
request = Records.newRecord(GetNewApplicationRequest.class);
try {
clientRMWithDT.getNewApplication(request);
} catch (IOException e) {
fail("Unexpected exception" + e);
} catch (YarnException e) {
fail("Unexpected exception" + e);
}
} finally {
rmDtSecretManager.stopThreads();
// TODO PRECOMMIT Close proxies.
if (clientRMWithDT != null) {
RPC.stopProxy(clientRMWithDT);
}
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hbase by apache.
the class Writables method getWritable.
/**
* Set bytes into the passed Writable by calling its
* {@link Writable#readFields(java.io.DataInput)}.
* @param bytes serialized bytes
* @param offset offset into array
* @param length length of data
* @param w An empty Writable (usually made by calling the null-arg
* constructor).
* @return The passed Writable after its readFields has been called fed
* by the passed <code>bytes</code> array or IllegalArgumentException
* if passed null or an empty <code>bytes</code> array.
* @throws IOException e
* @throws IllegalArgumentException
*/
public static Writable getWritable(final byte[] bytes, final int offset, final int length, final Writable w) throws IOException {
if (bytes == null || length <= 0) {
throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array");
}
if (w == null) {
throw new IllegalArgumentException("Writable cannot be null");
}
DataInputBuffer in = new DataInputBuffer();
try {
in.reset(bytes, offset, length);
w.readFields(in);
return w;
} finally {
in.close();
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestContainerManagerSecurity method getContainerTokenIdentifierFromToken.
private ContainerTokenIdentifier getContainerTokenIdentifierFromToken(Token containerToken) throws IOException {
ContainerTokenIdentifier containerTokenIdentifier;
containerTokenIdentifier = new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
return containerTokenIdentifier;
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestContainerManagerSecurity method testNMTokens.
private void testNMTokens(Configuration conf) throws Exception {
NMTokenSecretManagerInRM nmTokenSecretManagerRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
NMTokenSecretManagerInNM nmTokenSecretManagerNM = yarnCluster.getNodeManager(0).getNMContext().getNMTokenSecretManager();
RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
NodeManager nm = yarnCluster.getNodeManager(0);
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerNM, nm);
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerNM.getCurrentKey().getKeyId(), nmTokenSecretManagerRM.getCurrentKey().getKeyId());
/*
* Below cases should be tested.
* 1) If Invalid NMToken is used then it should be rejected.
* 2) If valid NMToken but belonging to another Node is used then that
* too should be rejected.
* 3) NMToken for say appAttempt-1 is used for starting/stopping/retrieving
* status for container with containerId for say appAttempt-2 should
* be rejected.
* 4) After start container call is successful nmtoken should have been
* saved in NMTokenSecretManagerInNM.
* 5) If start container call was successful (no matter if container is
* still running or not), appAttempt->NMToken should be present in
* NMTokenSecretManagerInNM's cache. Any future getContainerStatus call
* for containerId belonging to that application attempt using
* applicationAttempt's older nmToken should not get any invalid
* nmToken error. (This can be best tested if we roll over NMToken
* master key twice).
*/
YarnRPC rpc = YarnRPC.create(conf);
String user = "test";
Resource r = Resource.newInstance(1024, 1);
ApplicationId appId = ApplicationId.newInstance(1, 1);
MockRMApp m = new MockRMApp(appId.getId(), appId.getClusterTimestamp(), RMAppState.NEW);
yarnCluster.getResourceManager().getRMContext().getRMApps().put(appId, m);
ApplicationAttemptId validAppAttemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId validContainerId = ContainerId.newContainerId(validAppAttemptId, 0);
NodeId validNode = yarnCluster.getNodeManager(0).getNMContext().getNodeId();
NodeId invalidNode = NodeId.newInstance("InvalidHost", 1234);
org.apache.hadoop.yarn.api.records.Token validNMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
org.apache.hadoop.yarn.api.records.Token validContainerToken = containerTokenSecretManager.createContainerToken(validContainerId, 0, validNode, user, r, Priority.newInstance(10), 1234);
ContainerTokenIdentifier identifier = BuilderUtils.newContainerTokenIdentifier(validContainerToken);
Assert.assertEquals(Priority.newInstance(10), identifier.getPriority());
Assert.assertEquals(1234, identifier.getCreationTime());
StringBuilder sb;
// testInvalidNMToken ... creating NMToken using different secret manager.
NMTokenSecretManagerInRM tempManager = new NMTokenSecretManagerInRM(conf);
tempManager.rollMasterKey();
do {
tempManager.rollMasterKey();
tempManager.activateNextMasterKey();
// Making sure key id is different.
} while (tempManager.getCurrentKey().getKeyId() == nmTokenSecretManagerRM.getCurrentKey().getKeyId());
// Testing that NM rejects the requests when we don't send any token.
if (UserGroupInformation.isSecurityEnabled()) {
sb = new StringBuilder("Client cannot authenticate via:[TOKEN]");
} else {
sb = new StringBuilder("SIMPLE authentication is not enabled. Available:[TOKEN]");
}
String errorMsg = testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, null, true);
Assert.assertTrue(errorMsg.contains(sb.toString()));
org.apache.hadoop.yarn.api.records.Token invalidNMToken = tempManager.createNMToken(validAppAttemptId, validNode, user);
sb = new StringBuilder("Given NMToken for application : ");
sb.append(validAppAttemptId.toString()).append(" seems to have been generated illegally.");
Assert.assertTrue(sb.toString().contains(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, invalidNMToken, true)));
// valid NMToken but belonging to other node
invalidNMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, invalidNode, user);
sb = new StringBuilder("Given NMToken for application : ");
sb.append(validAppAttemptId).append(" is not valid for current node manager.expected : ").append(validNode.toString()).append(" found : ").append(invalidNode.toString());
Assert.assertTrue(sb.toString().contains(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, invalidNMToken, true)));
// using correct tokens. nmtoken for app attempt should get saved.
conf.setInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, 4 * 60 * 1000);
validContainerToken = containerTokenSecretManager.createContainerToken(validContainerId, 0, validNode, user, r, Priority.newInstance(0), 0);
Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, validNMToken, false).isEmpty());
Assert.assertTrue(nmTokenSecretManagerNM.isAppAttemptNMTokenKeyPresent(validAppAttemptId));
// using a new compatible version nmtoken, expect container can be started
// successfully.
ApplicationAttemptId validAppAttemptId2 = ApplicationAttemptId.newInstance(appId, 2);
ContainerId validContainerId2 = ContainerId.newContainerId(validAppAttemptId2, 0);
org.apache.hadoop.yarn.api.records.Token validContainerToken2 = containerTokenSecretManager.createContainerToken(validContainerId2, 0, validNode, user, r, Priority.newInstance(0), 0);
org.apache.hadoop.yarn.api.records.Token validNMToken2 = nmTokenSecretManagerRM.createNMToken(validAppAttemptId2, validNode, user);
// First, get a new NMTokenIdentifier.
NMTokenIdentifier newIdentifier = new NMTokenIdentifier();
byte[] tokenIdentifierContent = validNMToken2.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
newIdentifier.readFields(dib);
// Then, generate a new version NMTokenIdentifier (NMTokenIdentifierNewForTest)
// with additional field of message.
NMTokenIdentifierNewForTest newVersionIdentifier = new NMTokenIdentifierNewForTest(newIdentifier, "message");
// check new version NMTokenIdentifier has correct info.
Assert.assertEquals("The ApplicationAttemptId is changed after set to " + "newVersionIdentifier", validAppAttemptId2.getAttemptId(), newVersionIdentifier.getApplicationAttemptId().getAttemptId());
Assert.assertEquals("The message is changed after set to newVersionIdentifier", "message", newVersionIdentifier.getMessage());
Assert.assertEquals("The NodeId is changed after set to newVersionIdentifier", validNode, newVersionIdentifier.getNodeId());
// create new Token based on new version NMTokenIdentifier.
org.apache.hadoop.yarn.api.records.Token newVersionedNMToken = BaseNMTokenSecretManager.newInstance(nmTokenSecretManagerRM.retrievePassword(newVersionIdentifier), newVersionIdentifier);
// Verify startContainer is successful and no exception is thrown.
Assert.assertTrue(testStartContainer(rpc, validAppAttemptId2, validNode, validContainerToken2, newVersionedNMToken, false).isEmpty());
Assert.assertTrue(nmTokenSecretManagerNM.isAppAttemptNMTokenKeyPresent(validAppAttemptId2));
//Now lets wait till container finishes and is removed from node manager.
waitForContainerToFinishOnNM(validContainerId);
sb = new StringBuilder("Attempt to relaunch the same container with id ");
sb.append(validContainerId);
Assert.assertTrue(testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken, validNMToken, true).contains(sb.toString()));
// Container is removed from node manager's memory by this time.
// trying to stop the container. It should not throw any exception.
testStopContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, false);
// Rolling over master key twice so that we can check whether older keys
// are used for authentication.
rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
// Key rolled over once.. rolling over again
rollNMTokenMasterKey(nmTokenSecretManagerRM, nmTokenSecretManagerNM);
// trying get container status. Now saved nmToken should be used for
// authentication... It should complain saying container was recently
// stopped.
sb = new StringBuilder("Container ");
sb.append(validContainerId);
sb.append(" was recently stopped on node manager");
Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, true).contains(sb.toString()));
// Now lets remove the container from nm-memory
nm.getNodeStatusUpdater().clearFinishedContainersFromCache();
// This should fail as container is removed from recently tracked finished
// containers.
sb = new StringBuilder("Container ");
sb.append(validContainerId.toString());
sb.append(" is not handled by this NodeManager");
Assert.assertTrue(testGetContainer(rpc, validAppAttemptId, validNode, validContainerId, validNMToken, false).contains(sb.toString()));
// using appAttempt-1 NMtoken for launching container for appAttempt-2 should
// succeed.
ApplicationAttemptId attempt2 = ApplicationAttemptId.newInstance(appId, 2);
Token attempt1NMToken = nmTokenSecretManagerRM.createNMToken(validAppAttemptId, validNode, user);
org.apache.hadoop.yarn.api.records.Token newContainerToken = containerTokenSecretManager.createContainerToken(ContainerId.newContainerId(attempt2, 1), 0, validNode, user, r, Priority.newInstance(0), 0);
Assert.assertTrue(testStartContainer(rpc, attempt2, validNode, newContainerToken, attempt1NMToken, false).isEmpty());
}
Aggregations