use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HiveStoragePlugin method registerSchemas.
// Forced to synchronize this method to allow error recovery
// in the multi-threaded case. Can remove synchronized only
// by restructuring connections and cache to allow better
// recovery from failed secure connections.
@Override
public synchronized void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException {
try {
schemaFactory.registerSchemas(schemaConfig, parent);
return;
// Hack. We may need to retry the connection. But, we can't because
// the retry logic is implemented in the very connection we need to
// discard and rebuild. To work around, we discard the entire schema
// factory, and all its invalid connections. Very crude, but the
// easiest short-term solution until we refactor the code to do the
// job properly. See DRILL-5510.
} catch (Throwable e) {
// Unwrap exception
Throwable ex = e;
for (; ; ) {
// Case for failing on an invalid cached connection
if (ex instanceof MetaException || // tokens.
ex instanceof TTransportException) {
break;
}
if (ex.getCause() == null || ex.getCause() == ex) {
logger.error("Hive metastore register schemas failed", e);
throw new DrillRuntimeException("Unknown Hive error", e);
}
ex = ex.getCause();
}
}
try {
schemaFactory.close();
} catch (Throwable t) {
// Ignore, we're in a bad state.
logger.warn("Schema factory forced close failed, error ignored", t);
}
try {
schemaFactory = new HiveSchemaFactory(this, name, hiveConf);
} catch (ExecutionSetupException e) {
throw new DrillRuntimeException(e);
}
// Try the schemas again. If this fails, just give up.
schemaFactory.registerSchemas(schemaConfig, parent);
logger.debug("Successfully recovered from a Hive metastore connection failure.");
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class DrillHiveMetaStoreClient method createClientWithAuthz.
/**
* Create a DrillHiveMetaStoreClient for cases where:
* 1. Drill impersonation is enabled and
* 2. either storage (in remote HiveMetaStore server) or SQL standard based authorization (in Hive storage plugin)
* is enabled
* @param processUserMetaStoreClient MetaStoreClient of process user. Useful for generating the delegation tokens when
* SASL (KERBEROS or custom SASL implementations) is enabled.
* @param hiveConf Conf including authorization configuration
* @param userName User who is trying to access the Hive metadata
* @return
* @throws MetaException
*/
public static DrillHiveMetaStoreClient createClientWithAuthz(final DrillHiveMetaStoreClient processUserMetaStoreClient, final HiveConf hiveConf, final String userName) throws MetaException {
try {
boolean delegationTokenGenerated = false;
// UGI credentials to use for RPC communication with Hive MetaStore server
final UserGroupInformation ugiForRpc;
if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
// If the user impersonation is disabled in Hive storage plugin (not Drill impersonation), use the process
// user UGI credentials.
ugiForRpc = ImpersonationUtil.getProcessUserUGI();
} else {
ugiForRpc = ImpersonationUtil.createProxyUgi(userName);
if (hiveConf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL)) {
// When SASL is enabled for proxy user create a delegation token. Currently HiveMetaStoreClient can create
// client transport for proxy users only when the authentication mechanims is DIGEST (through use of
// delegation tokens).
String delegationToken = processUserMetaStoreClient.getDelegationToken(userName, userName);
try {
Utils.setTokenStr(ugiForRpc, delegationToken, HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
} catch (IOException e) {
throw new DrillRuntimeException("Couldn't setup delegation token in the UGI for Hive MetaStoreClient", e);
}
delegationTokenGenerated = true;
}
}
final HiveConf hiveConfForClient;
if (delegationTokenGenerated) {
hiveConfForClient = new HiveConf(hiveConf);
hiveConfForClient.set("hive.metastore.token.signature", HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
} else {
hiveConfForClient = hiveConf;
}
return ugiForRpc.doAs(new PrivilegedExceptionAction<DrillHiveMetaStoreClient>() {
@Override
public DrillHiveMetaStoreClient run() throws Exception {
return new HiveClientWithAuthzWithCaching(hiveConfForClient, ugiForRpc, userName);
}
});
} catch (final Exception e) {
throw new DrillRuntimeException("Failure setting up HiveMetaStore client.", e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HiveAuthorizationHelper method authorize.
/* Helper method to check privileges */
private void authorize(final HiveOperationType hiveOpType, final List<HivePrivilegeObject> toRead, final List<HivePrivilegeObject> toWrite, final String cmd) throws HiveAccessControlException {
try {
HiveAuthzContext.Builder authzContextBuilder = new HiveAuthzContext.Builder();
authzContextBuilder.setUserIpAddress("Not available");
authzContextBuilder.setCommandString(cmd);
authorizerV2.checkPrivileges(hiveOpType, toRead, toWrite, authzContextBuilder.build());
} catch (final HiveAccessControlException e) {
throw e;
} catch (final Exception e) {
throw new DrillRuntimeException("Failed to use the Hive authorization components: " + e.getMessage(), e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HiveUtilities method convertPartitionType.
/** Partition value is received in string format. Convert it into appropriate object based on the type. */
public static Object convertPartitionType(TypeInfo typeInfo, String value, final String defaultPartitionValue) {
if (typeInfo.getCategory() != Category.PRIMITIVE) {
// In Hive only primitive types are allowed as partition column types.
throw new DrillRuntimeException("Non-Primitive types are not allowed as partition column type in Hive, " + "but received one: " + typeInfo.getCategory());
}
if (defaultPartitionValue.equals(value)) {
return null;
}
final PrimitiveCategory pCat = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
try {
switch(pCat) {
case BINARY:
return value.getBytes();
case BOOLEAN:
return Boolean.parseBoolean(value);
case DECIMAL:
{
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return HiveDecimalUtils.enforcePrecisionScale(HiveDecimal.create(value), decimalTypeInfo.precision(), decimalTypeInfo.scale());
}
case DOUBLE:
return Double.parseDouble(value);
case FLOAT:
return Float.parseFloat(value);
case BYTE:
case SHORT:
case INT:
return Integer.parseInt(value);
case LONG:
return Long.parseLong(value);
case STRING:
case VARCHAR:
return value.getBytes();
case CHAR:
return value.trim().getBytes();
case TIMESTAMP:
return Timestamp.valueOf(value);
case DATE:
return Date.valueOf(value);
}
} catch (final Exception e) {
// In Hive, partition values that can't be converted from string are considered to be NULL.
logger.trace("Failed to interpret '{}' value from partition value string '{}'", pCat, value);
return null;
}
throwUnsupportedHiveDataTypeError(pCat.toString());
return null;
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HiveMetadataProvider method splitInputWithUGI.
private List<InputSplitWrapper> splitInputWithUGI(final Properties properties, final StorageDescriptor sd, final Partition partition) throws Exception {
watch.start();
try {
return ugi.doAs(new PrivilegedExceptionAction<List<InputSplitWrapper>>() {
public List<InputSplitWrapper> run() throws Exception {
final List<InputSplitWrapper> splits = Lists.newArrayList();
final JobConf job = new JobConf(hiveConf);
HiveUtilities.addConfToJob(job, properties);
job.setInputFormat(HiveUtilities.getInputFormatClass(job, sd, hiveReadEntry.getTable()));
final Path path = new Path(sd.getLocation());
final FileSystem fs = path.getFileSystem(job);
if (fs.exists(path)) {
FileInputFormat.addInputPath(job, path);
final InputFormat<?, ?> format = job.getInputFormat();
for (final InputSplit split : format.getSplits(job, 1)) {
splits.add(new InputSplitWrapper(split, partition));
}
}
return splits;
}
});
} catch (final InterruptedException | IOException e) {
final String errMsg = String.format("Failed to create input splits: %s", e.getMessage());
logger.error(errMsg, e);
throw new DrillRuntimeException(errMsg, e);
} finally {
logger.trace("Took {} µs to get splits from {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000, sd.getLocation());
watch.stop();
}
}
Aggregations