use of org.apache.accumulo.core.client.security.tokens.AuthenticationToken in project hive by apache.
the class HiveAccumuloTableInputFormat method getSplits.
@Override
public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException {
final AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(jobConf);
final Instance instance = accumuloParams.getInstance();
final ColumnMapper columnMapper;
try {
columnMapper = getColumnMapper(jobConf);
} catch (TooManyAccumuloColumnsException e) {
throw new IOException(e);
}
JobContext context = ShimLoader.getHadoopShims().newJobContext(Job.getInstance(jobConf));
Path[] tablePaths = FileInputFormat.getInputPaths(context);
try {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final Connector connector;
// Need to get a Connector so we look up the user's authorizations if not otherwise specified
if (accumuloParams.useSasl() && !ugi.hasKerberosCredentials()) {
// In a YARN/Tez job, don't have the Kerberos credentials anymore, use the delegation token
AuthenticationToken token = ConfiguratorBase.getAuthenticationToken(AccumuloInputFormat.class, jobConf);
// Convert the stub from the configuration back into a normal Token
// More reflection to support 1.6
token = helper.unwrapAuthenticationToken(jobConf, token);
connector = instance.getConnector(accumuloParams.getAccumuloUserName(), token);
} else {
// Still in the local JVM, use the username+password or Kerberos credentials
connector = accumuloParams.getConnector(instance);
}
final List<ColumnMapping> columnMappings = columnMapper.getColumnMappings();
final List<IteratorSetting> iterators = predicateHandler.getIterators(jobConf, columnMapper);
final Collection<Range> ranges = predicateHandler.getRanges(jobConf, columnMapper);
// We don't want that.
if (null != ranges && ranges.isEmpty()) {
return new InputSplit[0];
}
// Set the relevant information in the Configuration for the AccumuloInputFormat
configure(jobConf, instance, connector, accumuloParams, columnMapper, iterators, ranges);
int numColumns = columnMappings.size();
List<Integer> readColIds = ColumnProjectionUtils.getReadColumnIDs(jobConf);
// Sanity check
if (numColumns < readColIds.size())
throw new IOException("Number of column mappings (" + numColumns + ")" + " numbers less than the hive table columns. (" + readColIds.size() + ")");
// get splits from Accumulo
InputSplit[] splits = accumuloInputFormat.getSplits(jobConf, numSplits);
HiveAccumuloSplit[] hiveSplits = new HiveAccumuloSplit[splits.length];
for (int i = 0; i < splits.length; i++) {
RangeInputSplit ris = (RangeInputSplit) splits[i];
hiveSplits[i] = new HiveAccumuloSplit(ris, tablePaths[0]);
}
return hiveSplits;
} catch (AccumuloException e) {
log.error("Could not configure AccumuloInputFormat", e);
throw new IOException(StringUtils.stringifyException(e));
} catch (AccumuloSecurityException e) {
log.error("Could not configure AccumuloInputFormat", e);
throw new IOException(StringUtils.stringifyException(e));
} catch (SerDeException e) {
log.error("Could not configure AccumuloInputFormat", e);
throw new IOException(StringUtils.stringifyException(e));
}
}
use of org.apache.accumulo.core.client.security.tokens.AuthenticationToken in project hive by apache.
the class HiveAccumuloTableOutputFormat method configureAccumuloOutputFormat.
protected void configureAccumuloOutputFormat(JobConf job) throws IOException {
AccumuloConnectionParameters cnxnParams = getConnectionParams(job);
final String tableName = job.get(AccumuloSerDeParameters.TABLE_NAME);
// Make sure we actually go the table name
Preconditions.checkNotNull(tableName, "Expected Accumulo table name to be provided in job configuration");
// Set the necessary Accumulo information
try {
if (cnxnParams.useMockInstance()) {
setMockInstanceWithErrorChecking(job, cnxnParams.getAccumuloInstanceName());
} else {
// Accumulo instance name with ZK quorum
setZooKeeperInstanceWithErrorChecking(job, cnxnParams.getAccumuloInstanceName(), cnxnParams.getZooKeepers(), cnxnParams.useSasl());
}
// The AccumuloOutputFormat will look for it there.
if (cnxnParams.useSasl()) {
UserGroupInformation ugi = getCurrentUser();
if (!hasKerberosCredentials(ugi)) {
getHelper().addTokenFromUserToJobConf(ugi, job);
} else {
// Still in the local JVM, can use Kerberos credentials
try {
Connector connector = cnxnParams.getConnector();
AuthenticationToken token = getHelper().getDelegationToken(connector);
// Send the DelegationToken down to the Configuration for Accumulo to use
setConnectorInfoWithErrorChecking(job, cnxnParams.getAccumuloUserName(), token);
// Convert the Accumulo token in a Hadoop token
Token<? extends TokenIdentifier> accumuloToken = getHelper().getHadoopToken(token);
log.info("Adding Hadoop Token for Accumulo to Job's Credentials");
// Add the Hadoop token to the JobConf
getHelper().mergeTokenIntoJobConf(job, accumuloToken);
// Make sure the UGI contains the token too for good measure
if (!ugi.addToken(accumuloToken)) {
throw new IOException("Failed to add Accumulo Token to UGI");
}
} catch (AccumuloException | AccumuloSecurityException e) {
throw new IOException("Failed to acquire Accumulo DelegationToken", e);
}
}
} else {
setConnectorInfoWithErrorChecking(job, cnxnParams.getAccumuloUserName(), new PasswordToken(cnxnParams.getAccumuloPassword()));
}
// Set the table where we're writing this data
setDefaultAccumuloTableName(job, tableName);
} catch (AccumuloSecurityException e) {
log.error("Could not connect to Accumulo with provided credentials", e);
throw new IOException(e);
}
}
use of org.apache.accumulo.core.client.security.tokens.AuthenticationToken in project hive by apache.
the class HiveAccumuloHelper method getDelegationToken.
/**
* Obtain a DelegationToken from Accumulo in a backwards compatible manner.
*
* @param conn
* The Accumulo connector
* @return The DelegationToken instance
* @throws IOException
* If the token cannot be obtained
*/
public AuthenticationToken getDelegationToken(Connector conn) throws IOException {
try {
Class<?> clz = JavaUtils.loadClass(DELEGATION_TOKEN_CONFIG_CLASS_NAME);
// DelegationTokenConfig delegationTokenConfig = new DelegationTokenConfig();
Object delegationTokenConfig = clz.newInstance();
SecurityOperations secOps = conn.securityOperations();
Method getDelegationTokenMethod = secOps.getClass().getMethod(GET_DELEGATION_TOKEN_METHOD_NAME, clz);
// secOps.getDelegationToken(delegationTokenConfig)
return (AuthenticationToken) getDelegationTokenMethod.invoke(secOps, delegationTokenConfig);
} catch (Exception e) {
throw new IOException("Failed to obtain DelegationToken from Accumulo", e);
}
}
use of org.apache.accumulo.core.client.security.tokens.AuthenticationToken in project YCSB by brianfrankcooper.
the class AccumuloClient method init.
@Override
public void init() throws DBException {
colFam = new Text(getProperties().getProperty("accumulo.columnFamily"));
inst = new ZooKeeperInstance(getProperties().getProperty("accumulo.instanceName"), getProperties().getProperty("accumulo.zooKeepers"));
try {
String principal = getProperties().getProperty("accumulo.username");
AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password"));
connector = inst.getConnector(principal, token);
} catch (AccumuloException e) {
throw new DBException(e);
} catch (AccumuloSecurityException e) {
throw new DBException(e);
}
if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) {
System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work.");
}
}
use of org.apache.accumulo.core.client.security.tokens.AuthenticationToken in project hive by apache.
the class AccumuloStorageHandler method configureJobConf.
@SuppressWarnings("deprecation")
@Override
public void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
try {
Utils.addDependencyJars(jobConf, Tracer.class, Fate.class, Connector.class, Main.class, ZooKeeper.class, AccumuloStorageHandler.class);
} catch (IOException e) {
log.error("Could not add necessary Accumulo dependencies to classpath", e);
}
Properties tblProperties = tableDesc.getProperties();
AccumuloSerDeParameters serDeParams = null;
try {
serDeParams = new AccumuloSerDeParameters(jobConf, tblProperties, AccumuloSerDe.class.getName());
} catch (SerDeException e) {
log.error("Could not instantiate AccumuloSerDeParameters", e);
return;
}
try {
serDeParams.getRowIdFactory().addDependencyJars(jobConf);
} catch (IOException e) {
log.error("Could not add necessary dependencies for " + serDeParams.getRowIdFactory().getClass(), e);
}
// Job so that it gets passed down to the YARN/Tez task.
if (connectionParams.useSasl()) {
try {
// Obtain a delegation token from Accumulo
Connector conn = connectionParams.getConnector();
AuthenticationToken token = helper.getDelegationToken(conn);
// MapReduce API. Catch the error, log a debug message and just keep going
try {
InputConfigurator.setConnectorInfo(AccumuloInputFormat.class, jobConf, connectionParams.getAccumuloUserName(), token);
} catch (IllegalStateException e) {
// The implementation balks when this method is invoked multiple times
log.debug("Ignoring IllegalArgumentException about re-setting connector information");
}
try {
OutputConfigurator.setConnectorInfo(AccumuloOutputFormat.class, jobConf, connectionParams.getAccumuloUserName(), token);
} catch (IllegalStateException e) {
// The implementation balks when this method is invoked multiple times
log.debug("Ignoring IllegalArgumentException about re-setting connector information");
}
// Convert the Accumulo token in a Hadoop token
Token<? extends TokenIdentifier> accumuloToken = helper.getHadoopToken(token);
log.info("Adding Hadoop Token for Accumulo to Job's Credentials");
// Add the Hadoop token to the JobConf
helper.mergeTokenIntoJobConf(jobConf, accumuloToken);
} catch (Exception e) {
throw new RuntimeException("Failed to obtain DelegationToken for " + connectionParams.getAccumuloUserName(), e);
}
}
}
Aggregations