use of org.apache.accumulo.core.client.security.tokens.KerberosToken in project accumulo by apache.
the class MapReduceClientOpts method getToken.
@Override
public AuthenticationToken getToken() {
AuthenticationToken authToken = super.getToken();
// so we need to request a delegation token and use that instead.
if (authToken instanceof KerberosToken) {
log.info("Received KerberosToken, fetching DelegationToken for MapReduce");
final KerberosToken krbToken = (KerberosToken) authToken;
try {
UserGroupInformation user = UserGroupInformation.getCurrentUser();
if (!user.hasKerberosCredentials()) {
throw new IllegalStateException("Expected current user to have Kerberos credentials");
}
String newPrincipal = user.getUserName();
log.info("Obtaining delegation token for {}", newPrincipal);
setPrincipal(newPrincipal);
Connector conn = getInstance().getConnector(newPrincipal, krbToken);
// Do the explicit check to see if the user has the permission to get a delegation token
if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
log.error("{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's" + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.", user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
throw new IllegalStateException(conn.whoami() + " does not have permission to obtain a delegation token");
}
// Get the delegation token from Accumulo
return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
} catch (Exception e) {
final String msg = "Failed to acquire DelegationToken for use with MapReduce";
log.error(msg, e);
throw new RuntimeException(msg, e);
}
}
return authToken;
}
use of org.apache.accumulo.core.client.security.tokens.KerberosToken in project hive by apache.
the class HiveAccumuloTableInputFormat method getSplits.
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
final AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(jobConf);
final Instance instance = accumuloParams.getInstance();
final ColumnMapper columnMapper;
try {
columnMapper = getColumnMapper(jobConf);
} catch (TooManyAccumuloColumnsException e) {
throw new IOException(e);
}
JobContext context = ShimLoader.getHadoopShims().newJobContext(Job.getInstance(jobConf));
Path[] tablePaths = FileInputFormat.getInputPaths(context);
try {
Connector connector = null;
// Need to get a Connector so we look up the user's authorizations if not otherwise specified
if (accumuloParams.useSasl()) {
log.info("Current user: " + UserGroupInformation.getCurrentUser());
// In a YARN/Tez job, don't have the Kerberos credentials anymore, use the delegation token
AuthenticationToken token = ConfiguratorBase.getAuthenticationToken(AccumuloInputFormat.class, jobConf);
if (null != token && !jobConf.getCredentials().getAllTokens().isEmpty()) {
// Convert the stub from the configuration back into a normal Token
log.info("Found authentication token in Configuration: " + token);
log.info("Job credential tokens: " + jobConf.getCredentials().getAllTokens());
AuthenticationToken unwrappedToken = ConfiguratorBase.unwrapAuthenticationToken(jobConf, token);
log.info("Converted authentication token from Configuration into: " + unwrappedToken);
// will return back the original token (which we know is insufficient)
if (unwrappedToken != token) {
log.info("Creating Accumulo Connector with unwrapped delegation token");
connector = instance.getConnector(accumuloParams.getAccumuloUserName(), unwrappedToken);
} else {
log.info("Job credentials did not contain delegation token, fetching new token");
}
}
if (connector == null) {
log.info("Obtaining Accumulo Connector using KerberosToken");
// Construct a KerberosToken -- relies on ProxyUser configuration. Will be the client making
// the request on top of the HS2's user. Accumulo will require proper proxy-user auth configs.
connector = instance.getConnector(accumuloParams.getAccumuloUserName(), new KerberosToken(accumuloParams.getAccumuloUserName()));
}
} else {
// Still in the local JVM, use the username+password or Kerberos credentials
connector = accumuloParams.getConnector(instance);
}
final List<ColumnMapping> columnMappings = columnMapper.getColumnMappings();
final List<IteratorSetting> iterators = predicateHandler.getIterators(jobConf, columnMapper);
final Collection<Range> ranges = predicateHandler.getRanges(jobConf, columnMapper);
// We don't want that.
if (null != ranges && ranges.isEmpty()) {
return new InputSplit[0];
}
// Set the relevant information in the Configuration for the AccumuloInputFormat
configure(jobConf, instance, connector, accumuloParams, columnMapper, iterators, ranges);
int numColumns = columnMappings.size();
List<Integer> readColIds = ColumnProjectionUtils.getReadColumnIDs(jobConf);
// Sanity check
if (numColumns < readColIds.size())
throw new IOException("Number of column mappings (" + numColumns + ")" + " numbers less than the hive table columns. (" + readColIds.size() + ")");
// get splits from Accumulo
InputSplit[] splits = accumuloInputFormat.getSplits(jobConf, numSplits);
HiveAccumuloSplit[] hiveSplits = new HiveAccumuloSplit[splits.length];
for (int i = 0; i < splits.length; i++) {
RangeInputSplit ris = (RangeInputSplit) splits[i];
ris.setLogLevel(Level.DEBUG);
hiveSplits[i] = new HiveAccumuloSplit(ris, tablePaths[0]);
}
return hiveSplits;
} catch (AccumuloException e) {
log.error("Could not configure AccumuloInputFormat", e);
throw new IOException(StringUtils.stringifyException(e));
} catch (AccumuloSecurityException e) {
log.error("Could not configure AccumuloInputFormat", e);
throw new IOException(StringUtils.stringifyException(e));
} catch (SerDeException e) {
log.error("Could not configure AccumuloInputFormat", e);
throw new IOException(StringUtils.stringifyException(e));
}
}
use of org.apache.accumulo.core.client.security.tokens.KerberosToken in project accumulo by apache.
the class ShellConfigIT method experimentalPropTest.
@Test
public void experimentalPropTest() throws Exception {
// ensure experimental props do not show up in config output unless set
AuthenticationToken token = getAdminToken();
File clientConfFile = null;
switch(getClusterType()) {
case MINI:
MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster();
clientConfFile = mac.getConfig().getClientConfFile();
break;
case STANDALONE:
StandaloneAccumuloClusterConfiguration standaloneConf = (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
clientConfFile = standaloneConf.getClientConfFile();
break;
default:
Assert.fail("Unknown cluster type");
}
Assert.assertNotNull(clientConfFile);
TestShell ts = null;
if (token instanceof PasswordToken) {
String passwd = new String(((PasswordToken) token).getPassword(), UTF_8);
ts = new TestShell(getAdminPrincipal(), passwd, getCluster().getInstanceName(), getCluster().getZooKeepers(), clientConfFile);
} else if (token instanceof KerberosToken) {
ts = new TestShell(getAdminPrincipal(), null, getCluster().getInstanceName(), getCluster().getZooKeepers(), clientConfFile);
} else {
Assert.fail("Unknown token type");
}
assertTrue(Property.CRYPTO_CIPHER_KEY_ALGORITHM_NAME.isExperimental());
String configOutput = ts.exec("config");
assertTrue(configOutput.contains(PerTableVolumeChooser.TABLE_VOLUME_CHOOSER));
assertFalse(configOutput.contains(Property.CRYPTO_CIPHER_KEY_ALGORITHM_NAME.getKey()));
}
use of org.apache.accumulo.core.client.security.tokens.KerberosToken in project accumulo by apache.
the class ThriftTransportKeyTest method testSaslPrincipalIsSignificant.
@Test
public void testSaslPrincipalIsSignificant() throws IOException, InterruptedException {
UserGroupInformation user1 = UserGroupInformation.createUserForTesting("user1", new String[0]);
final KerberosToken token = EasyMock.createMock(KerberosToken.class);
SaslConnectionParams saslParams1 = user1.doAs(new PrivilegedExceptionAction<SaslConnectionParams>() {
@Override
public SaslConnectionParams run() throws Exception {
final ClientConfiguration clientConf = ClientConfiguration.loadDefault();
// The primary is the first component of the principal
final String primary = "accumulo";
clientConf.withSasl(true, primary);
assertEquals("true", clientConf.get(ClientProperty.INSTANCE_RPC_SASL_ENABLED));
return new SaslConnectionParams(clientConf, token);
}
});
UserGroupInformation user2 = UserGroupInformation.createUserForTesting("user2", new String[0]);
SaslConnectionParams saslParams2 = user2.doAs(new PrivilegedExceptionAction<SaslConnectionParams>() {
@Override
public SaslConnectionParams run() throws Exception {
final ClientConfiguration clientConf = ClientConfiguration.loadDefault();
// The primary is the first component of the principal
final String primary = "accumulo";
clientConf.withSasl(true, primary);
assertEquals("true", clientConf.get(ClientProperty.INSTANCE_RPC_SASL_ENABLED));
return new SaslConnectionParams(clientConf, token);
}
});
ThriftTransportKey ttk1 = new ThriftTransportKey(HostAndPort.fromParts("localhost", 9997), 1l, null, saslParams1), ttk2 = new ThriftTransportKey(HostAndPort.fromParts("localhost", 9997), 1l, null, saslParams2);
assertNotEquals(ttk1, ttk2);
assertNotEquals(ttk1.hashCode(), ttk2.hashCode());
}
use of org.apache.accumulo.core.client.security.tokens.KerberosToken in project accumulo by apache.
the class TracesResource method getScanner.
protected Pair<Scanner, UserGroupInformation> getScanner() throws AccumuloException, AccumuloSecurityException {
AccumuloConfiguration conf = Monitor.getContext().getConfiguration();
final boolean saslEnabled = conf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED);
UserGroupInformation traceUgi = null;
final String principal;
final AuthenticationToken at;
Map<String, String> loginMap = conf.getAllPropertiesWithPrefix(Property.TRACE_TOKEN_PROPERTY_PREFIX);
// May be null
String keytab = loginMap.get(Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey() + "keytab");
if (keytab == null || keytab.length() == 0) {
keytab = conf.getPath(Property.GENERAL_KERBEROS_KEYTAB);
}
if (saslEnabled && null != keytab) {
principal = SecurityUtil.getServerPrincipal(conf.get(Property.TRACE_USER));
try {
traceUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
} catch (IOException e) {
throw new RuntimeException("Failed to login as trace user", e);
}
} else {
principal = conf.get(Property.TRACE_USER);
}
if (!saslEnabled) {
if (loginMap.isEmpty()) {
Property p = Property.TRACE_PASSWORD;
at = new PasswordToken(conf.get(p).getBytes(UTF_8));
} else {
Properties props = new Properties();
int prefixLength = Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey().length();
for (Entry<String, String> entry : loginMap.entrySet()) {
props.put(entry.getKey().substring(prefixLength), entry.getValue());
}
AuthenticationToken token = Property.createInstanceFromPropertyName(conf, Property.TRACE_TOKEN_TYPE, AuthenticationToken.class, new PasswordToken());
token.init(props);
at = token;
}
} else {
at = null;
}
final String table = conf.get(Property.TRACE_TABLE);
Scanner scanner;
if (null != traceUgi) {
try {
scanner = traceUgi.doAs(new PrivilegedExceptionAction<Scanner>() {
@Override
public Scanner run() throws Exception {
// Make the KerberosToken inside the doAs
AuthenticationToken token = at;
if (null == token) {
token = new KerberosToken();
}
return getScanner(table, principal, token);
}
});
} catch (IOException | InterruptedException e) {
throw new RuntimeException("Failed to obtain scanner", e);
}
} else {
if (null == at) {
throw new AssertionError("AuthenticationToken should not be null");
}
scanner = getScanner(table, principal, at);
}
return new Pair<>(scanner, traceUgi);
}
Aggregations