use of org.apache.hadoop.security.token.TokenIdentifier in project alluxio by Alluxio.
the class Client method setupContainerLaunchContext.
private void setupContainerLaunchContext() throws IOException, YarnException {
Map<String, String> applicationMasterArgs = ImmutableMap.<String, String>of("-num_workers", Integer.toString(mNumWorkers), "-master_address", mMasterAddress, "-resource_path", mResourcePath);
final String amCommand = YarnUtils.buildCommand(YarnContainerType.APPLICATION_MASTER, applicationMasterArgs);
System.out.println("ApplicationMaster command: " + amCommand);
mAmContainer.setCommands(Collections.singletonList(amCommand));
// Setup local resources
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put("alluxio.tar.gz", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.tar.gz"));
localResources.put("alluxio-yarn-setup.sh", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio-yarn-setup.sh"));
localResources.put("alluxio.jar", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.jar"));
mAmContainer.setLocalResources(localResources);
// Setup CLASSPATH for ApplicationMaster
Map<String, String> appMasterEnv = new HashMap<String, String>();
setupAppMasterEnv(appMasterEnv);
mAmContainer.setEnvironment(appMasterEnv);
// Set up security tokens for launching our ApplicationMaster container.
if (UserGroupInformation.isSecurityEnabled()) {
Credentials credentials = new Credentials();
String tokenRenewer = mYarnConf.get(YarnConfiguration.RM_PRINCIPAL);
if (tokenRenewer == null || tokenRenewer.length() == 0) {
throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
}
org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(mYarnConf);
// getting tokens for the default file-system.
final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials);
if (tokens != null) {
for (Token<?> token : tokens) {
LOG.info("Got dt for " + fs.getUri() + "; " + token);
}
}
// getting yarn resource manager token
org.apache.hadoop.conf.Configuration config = mYarnClient.getConfig();
Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(mYarnClient.getRMDelegationToken(new org.apache.hadoop.io.Text(tokenRenewer)), ClientRMProxy.getRMDelegationTokenService(config));
LOG.info("Added RM delegation token: " + token);
credentials.addToken(token.getService(), token);
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
ByteBuffer buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
mAmContainer.setTokens(buffer);
}
}
use of org.apache.hadoop.security.token.TokenIdentifier in project apex-core by apache.
the class LaunchContainerRunnable method getTokens.
public static ByteBuffer getTokens(UserGroupInformation ugi, Token<StramDelegationTokenIdentifier> delegationToken) {
try {
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
Credentials credentials = new Credentials();
for (Token<? extends TokenIdentifier> token : tokens) {
if (!token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
credentials.addToken(token.getService(), token);
LOG.debug("Passing container token {}", token);
}
}
credentials.addToken(delegationToken.getService(), delegationToken);
DataOutputBuffer dataOutput = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dataOutput);
byte[] tokenBytes = dataOutput.getData();
ByteBuffer cTokenBuf = ByteBuffer.wrap(tokenBytes);
return cTokenBuf.duplicate();
} catch (IOException e) {
throw new RuntimeException("Error generating delegation token", e);
}
}
use of org.apache.hadoop.security.token.TokenIdentifier in project samza by apache.
the class YarnContainerRunner method startContainer.
/**
* Runs a command as a process on the container. All binaries needed by the physical process are packaged in the URL
* specified by packagePath.
*/
private void startContainer(Path packagePath, Container container, Map<String, String> env, final String cmd) throws SamzaContainerLaunchException {
log.info("starting container {} {} {} {}", new Object[] { packagePath, container, env, cmd });
// TODO: SAMZA-1144 remove the customized approach for package resource and use the common one.
// But keep it now for backward compatibility.
// set the local package so that the containers and app master are provisioned with it
LocalResource packageResource = Records.newRecord(LocalResource.class);
URL packageUrl = ConverterUtils.getYarnUrlFromPath(packagePath);
FileStatus fileStatus;
try {
fileStatus = packagePath.getFileSystem(yarnConfiguration).getFileStatus(packagePath);
} catch (IOException ioe) {
log.error("IO Exception when accessing the package status from the filesystem", ioe);
throw new SamzaContainerLaunchException("IO Exception when accessing the package status from the filesystem");
}
packageResource.setResource(packageUrl);
log.info("set package Resource in YarnContainerRunner for {}", packageUrl);
packageResource.setSize(fileStatus.getLen());
packageResource.setTimestamp(fileStatus.getModificationTime());
packageResource.setType(LocalResourceType.ARCHIVE);
packageResource.setVisibility(LocalResourceVisibility.APPLICATION);
ByteBuffer allTokens;
// copy tokens (copied from dist shell example)
try {
Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
DataOutputBuffer dob = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dob);
// now remove the AM->RM token so that containers cannot access it
Iterator iter = credentials.getAllTokens().iterator();
while (iter.hasNext()) {
TokenIdentifier token = ((Token) iter.next()).decodeIdentifier();
if (token != null && token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
iter.remove();
}
}
allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
} catch (IOException ioe) {
log.error("IOException when writing credentials.", ioe);
throw new SamzaContainerLaunchException("IO Exception when writing credentials to output buffer");
}
Map<String, LocalResource> localResourceMap = new HashMap<>();
localResourceMap.put("__package", packageResource);
// include the resources from the universal resource configurations
LocalizerResourceMapper resourceMapper = new LocalizerResourceMapper(new LocalizerResourceConfig(config), yarnConfiguration);
localResourceMap.putAll(resourceMapper.getResourceMap());
ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
context.setEnvironment(env);
context.setTokens(allTokens.duplicate());
context.setCommands(new ArrayList<String>() {
{
add(cmd);
}
});
context.setLocalResources(localResourceMap);
log.debug("setting localResourceMap to {}", localResourceMap);
log.debug("setting context to {}", context);
StartContainerRequest startContainerRequest = Records.newRecord(StartContainerRequest.class);
startContainerRequest.setContainerLaunchContext(context);
try {
nmClient.startContainer(container, context);
} catch (YarnException ye) {
log.error("Received YarnException when starting container: " + container.getId(), ye);
throw new SamzaContainerLaunchException("Received YarnException when starting container: " + container.getId(), ye);
} catch (IOException ioe) {
log.error("Received IOException when starting container: " + container.getId(), ioe);
throw new SamzaContainerLaunchException("Received IOException when starting container: " + container.getId(), ioe);
}
}
use of org.apache.hadoop.security.token.TokenIdentifier in project hive by apache.
the class TestHiveAccumuloHelper method testOutputFormatSaslConfigurationWithKerberosToken.
@Test
public void testOutputFormatSaslConfigurationWithKerberosToken() throws Exception {
final JobConf jobConf = new JobConf();
final HiveAccumuloHelper helper = Mockito.mock(HiveAccumuloHelper.class);
final AuthenticationToken authToken = Mockito.mock(AuthenticationToken.class);
final Token hadoopToken = Mockito.mock(Token.class);
final AccumuloConnectionParameters cnxnParams = Mockito.mock(AccumuloConnectionParameters.class);
final Connector connector = Mockito.mock(Connector.class);
final String user = "bob";
final String instanceName = "accumulo";
final String zookeepers = "host1:2181,host2:2181,host3:2181";
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[0]);
// Call the real methods for these
Mockito.doCallRealMethod().when(helper).updateOutputFormatConfWithAccumuloToken(jobConf, ugi, cnxnParams);
Mockito.doCallRealMethod().when(helper).updateConfWithAccumuloToken(jobConf, ugi, cnxnParams, false);
// Return our mocked objects
Mockito.when(cnxnParams.getConnector()).thenReturn(connector);
Mockito.when(helper.getDelegationToken(connector)).thenReturn(authToken);
Mockito.when(helper.getHadoopToken(authToken)).thenReturn(hadoopToken);
// Stub AccumuloConnectionParameters actions
Mockito.when(cnxnParams.useSasl()).thenReturn(true);
Mockito.when(cnxnParams.getAccumuloUserName()).thenReturn(user);
Mockito.when(cnxnParams.getAccumuloInstanceName()).thenReturn(instanceName);
Mockito.when(cnxnParams.getZooKeepers()).thenReturn(zookeepers);
// We have kerberos credentials
Mockito.when(helper.hasKerberosCredentials(ugi)).thenReturn(true);
// Invoke the OutputFormat entrypoint
helper.updateOutputFormatConfWithAccumuloToken(jobConf, ugi, cnxnParams);
Mockito.verify(helper).setOutputFormatConnectorInfo(jobConf, user, authToken);
Mockito.verify(helper).mergeTokenIntoJobConf(jobConf, hadoopToken);
Mockito.verify(helper).addTokenFromUserToJobConf(ugi, jobConf);
// Make sure the token made it into the UGI
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
Assert.assertEquals(1, tokens.size());
Assert.assertEquals(hadoopToken, tokens.iterator().next());
}
use of org.apache.hadoop.security.token.TokenIdentifier in project cdap by caskdata.
the class YarnTokenUtils method obtainToken.
/**
* Gets a Yarn delegation token and stores it in the given Credentials.
*
* @return the same Credentials instance as the one given in parameter.
*/
public static Credentials obtainToken(YarnConfiguration configuration, Credentials credentials) {
if (!UserGroupInformation.isSecurityEnabled()) {
return credentials;
}
try {
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(configuration);
yarnClient.start();
try {
Text renewer = new Text(UserGroupInformation.getCurrentUser().getShortUserName());
org.apache.hadoop.yarn.api.records.Token rmDelegationToken = yarnClient.getRMDelegationToken(renewer);
// TODO: The following logic should be replaced with call to ClientRMProxy.getRMDelegationTokenService after
// CDAP-4825 is resolved
List<String> services = new ArrayList<>();
if (HAUtil.isHAEnabled(configuration)) {
// If HA is enabled, we need to enumerate all RM hosts
// and add the corresponding service name to the token service
// Copy the yarn conf since we need to modify it to get the RM addresses
YarnConfiguration yarnConf = new YarnConfiguration(configuration);
for (String rmId : HAUtil.getRMHAIds(configuration)) {
yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
InetSocketAddress address = yarnConf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT);
services.add(SecurityUtil.buildTokenService(address).toString());
}
} else {
services.add(SecurityUtil.buildTokenService(YarnUtils.getRMAddress(configuration)).toString());
}
Token<TokenIdentifier> token = ConverterUtils.convertFromYarn(rmDelegationToken, null);
token.setService(new Text(Joiner.on(',').join(services)));
credentials.addToken(new Text(token.getService()), token);
// OK to log, it won't log the credential, only information about the token.
LOG.debug("Added RM delegation token: {}", token);
} finally {
yarnClient.stop();
}
return credentials;
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
Aggregations