Search in sources :

Example 61 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.

the class HiveStoragePlugin method registerSchemas.

// Forced to synchronize this method to allow error recovery
// in the multi-threaded case. Can remove synchronized only
// by restructuring connections and cache to allow better
// recovery from failed secure connections.
@Override
public synchronized void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException {
    try {
        schemaFactory.registerSchemas(schemaConfig, parent);
        return;
    // Hack. We may need to retry the connection. But, we can't because
    // the retry logic is implemented in the very connection we need to
    // discard and rebuild. To work around, we discard the entire schema
    // factory, and all its invalid connections. Very crude, but the
    // easiest short-term solution until we refactor the code to do the
    // job properly. See DRILL-5510.
    } catch (Throwable e) {
        // Unwrap exception
        Throwable ex = e;
        for (; ; ) {
            // Case for failing on an invalid cached connection
            if (ex instanceof MetaException || // tokens.
            ex instanceof TTransportException) {
                break;
            }
            if (ex.getCause() == null || ex.getCause() == ex) {
                logger.error("Hive metastore register schemas failed", e);
                throw new DrillRuntimeException("Unknown Hive error", e);
            }
            ex = ex.getCause();
        }
    }
    try {
        schemaFactory.close();
    } catch (Throwable t) {
        // Ignore, we're in a bad state.
        logger.warn("Schema factory forced close failed, error ignored", t);
    }
    try {
        schemaFactory = new HiveSchemaFactory(this, name, hiveConf);
    } catch (ExecutionSetupException e) {
        throw new DrillRuntimeException(e);
    }
    // Try the schemas again. If this fails, just give up.
    schemaFactory.registerSchemas(schemaConfig, parent);
    logger.debug("Successfully recovered from a Hive metastore connection failure.");
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) TTransportException(org.apache.thrift.transport.TTransportException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) HiveSchemaFactory(org.apache.drill.exec.store.hive.schema.HiveSchemaFactory) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 62 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.

the class DrillHiveMetaStoreClient method createClientWithAuthz.

/**
   * Create a DrillHiveMetaStoreClient for cases where:
   *   1. Drill impersonation is enabled and
   *   2. either storage (in remote HiveMetaStore server) or SQL standard based authorization (in Hive storage plugin)
   *      is enabled
   * @param processUserMetaStoreClient MetaStoreClient of process user. Useful for generating the delegation tokens when
   *                                   SASL (KERBEROS or custom SASL implementations) is enabled.
   * @param hiveConf Conf including authorization configuration
   * @param userName User who is trying to access the Hive metadata
   * @return
   * @throws MetaException
   */
public static DrillHiveMetaStoreClient createClientWithAuthz(final DrillHiveMetaStoreClient processUserMetaStoreClient, final HiveConf hiveConf, final String userName) throws MetaException {
    try {
        boolean delegationTokenGenerated = false;
        // UGI credentials to use for RPC communication with Hive MetaStore server
        final UserGroupInformation ugiForRpc;
        if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
            // If the user impersonation is disabled in Hive storage plugin (not Drill impersonation), use the process
            // user UGI credentials.
            ugiForRpc = ImpersonationUtil.getProcessUserUGI();
        } else {
            ugiForRpc = ImpersonationUtil.createProxyUgi(userName);
            if (hiveConf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL)) {
                // When SASL is enabled for proxy user create a delegation token. Currently HiveMetaStoreClient can create
                // client transport for proxy users only when the authentication mechanims is DIGEST (through use of
                // delegation tokens).
                String delegationToken = processUserMetaStoreClient.getDelegationToken(userName, userName);
                try {
                    Utils.setTokenStr(ugiForRpc, delegationToken, HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
                } catch (IOException e) {
                    throw new DrillRuntimeException("Couldn't setup delegation token in the UGI for Hive MetaStoreClient", e);
                }
                delegationTokenGenerated = true;
            }
        }
        final HiveConf hiveConfForClient;
        if (delegationTokenGenerated) {
            hiveConfForClient = new HiveConf(hiveConf);
            hiveConfForClient.set("hive.metastore.token.signature", HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
        } else {
            hiveConfForClient = hiveConf;
        }
        return ugiForRpc.doAs(new PrivilegedExceptionAction<DrillHiveMetaStoreClient>() {

            @Override
            public DrillHiveMetaStoreClient run() throws Exception {
                return new HiveClientWithAuthzWithCaching(hiveConfForClient, ugiForRpc, userName);
            }
        });
    } catch (final Exception e) {
        throw new DrillRuntimeException("Failure setting up HiveMetaStore client.", e);
    }
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) IOException(java.io.IOException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) UserException(org.apache.drill.common.exceptions.UserException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) HiveAccessControlException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException) UnknownDBException(org.apache.hadoop.hive.metastore.api.UnknownDBException) UnknownTableException(org.apache.hadoop.hive.metastore.api.UnknownTableException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 63 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.

the class HiveAuthorizationHelper method authorize.

/* Helper method to check privileges */
private void authorize(final HiveOperationType hiveOpType, final List<HivePrivilegeObject> toRead, final List<HivePrivilegeObject> toWrite, final String cmd) throws HiveAccessControlException {
    try {
        HiveAuthzContext.Builder authzContextBuilder = new HiveAuthzContext.Builder();
        authzContextBuilder.setUserIpAddress("Not available");
        authzContextBuilder.setCommandString(cmd);
        authorizerV2.checkPrivileges(hiveOpType, toRead, toWrite, authzContextBuilder.build());
    } catch (final HiveAccessControlException e) {
        throw e;
    } catch (final Exception e) {
        throw new DrillRuntimeException("Failed to use the Hive authorization components: " + e.getMessage(), e);
    }
}
Also used : HiveAccessControlException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException) HiveAuthzContext(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) UserException(org.apache.drill.common.exceptions.UserException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) HiveAccessControlException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException) HiveAuthzPluginException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException)

Example 64 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.

the class HiveUtilities method convertPartitionType.

/** Partition value is received in string format. Convert it into appropriate object based on the type. */
public static Object convertPartitionType(TypeInfo typeInfo, String value, final String defaultPartitionValue) {
    if (typeInfo.getCategory() != Category.PRIMITIVE) {
        // In Hive only primitive types are allowed as partition column types.
        throw new DrillRuntimeException("Non-Primitive types are not allowed as partition column type in Hive, " + "but received one: " + typeInfo.getCategory());
    }
    if (defaultPartitionValue.equals(value)) {
        return null;
    }
    final PrimitiveCategory pCat = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
    try {
        switch(pCat) {
            case BINARY:
                return value.getBytes();
            case BOOLEAN:
                return Boolean.parseBoolean(value);
            case DECIMAL:
                {
                    DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
                    return HiveDecimalUtils.enforcePrecisionScale(HiveDecimal.create(value), decimalTypeInfo.precision(), decimalTypeInfo.scale());
                }
            case DOUBLE:
                return Double.parseDouble(value);
            case FLOAT:
                return Float.parseFloat(value);
            case BYTE:
            case SHORT:
            case INT:
                return Integer.parseInt(value);
            case LONG:
                return Long.parseLong(value);
            case STRING:
            case VARCHAR:
                return value.getBytes();
            case CHAR:
                return value.trim().getBytes();
            case TIMESTAMP:
                return Timestamp.valueOf(value);
            case DATE:
                return Date.valueOf(value);
        }
    } catch (final Exception e) {
        // In Hive, partition values that can't be converted from string are considered to be NULL.
        logger.trace("Failed to interpret '{}' value from partition value string '{}'", pCat, value);
        return null;
    }
    throwUnsupportedHiveDataTypeError(pCat.toString());
    return null;
}
Also used : DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) PrimitiveCategory(org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory) PrimitiveTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo) UserException(org.apache.drill.common.exceptions.UserException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException)

Example 65 with DrillRuntimeException

use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.

the class HiveMetadataProvider method splitInputWithUGI.

private List<InputSplitWrapper> splitInputWithUGI(final Properties properties, final StorageDescriptor sd, final Partition partition) throws Exception {
    watch.start();
    try {
        return ugi.doAs(new PrivilegedExceptionAction<List<InputSplitWrapper>>() {

            public List<InputSplitWrapper> run() throws Exception {
                final List<InputSplitWrapper> splits = Lists.newArrayList();
                final JobConf job = new JobConf(hiveConf);
                HiveUtilities.addConfToJob(job, properties);
                job.setInputFormat(HiveUtilities.getInputFormatClass(job, sd, hiveReadEntry.getTable()));
                final Path path = new Path(sd.getLocation());
                final FileSystem fs = path.getFileSystem(job);
                if (fs.exists(path)) {
                    FileInputFormat.addInputPath(job, path);
                    final InputFormat<?, ?> format = job.getInputFormat();
                    for (final InputSplit split : format.getSplits(job, 1)) {
                        splits.add(new InputSplitWrapper(split, partition));
                    }
                }
                return splits;
            }
        });
    } catch (final InterruptedException | IOException e) {
        final String errMsg = String.format("Failed to create input splits: %s", e.getMessage());
        logger.error(errMsg, e);
        throw new DrillRuntimeException(errMsg, e);
    } finally {
        logger.trace("Took {} µs to get splits from {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000, sd.getLocation());
        watch.stop();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) IOException(java.io.IOException) InputFormat(org.apache.hadoop.mapred.InputFormat) FileInputFormat(org.apache.hadoop.mapred.FileInputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) List(java.util.List) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit)

Aggregations

DrillRuntimeException (org.apache.drill.common.exceptions.DrillRuntimeException)69 IOException (java.io.IOException)32 VersionMismatchException (org.apache.drill.exec.exception.VersionMismatchException)9 Stopwatch (com.google.common.base.Stopwatch)7 ExecutionSetupException (org.apache.drill.common.exceptions.ExecutionSetupException)7 UserException (org.apache.drill.common.exceptions.UserException)6 KeeperException (org.apache.zookeeper.KeeperException)6 NodeExistsException (org.apache.zookeeper.KeeperException.NodeExistsException)6 NoSuchElementException (java.util.NoSuchElementException)5 Path (org.apache.hadoop.fs.Path)5 Bson (org.bson.conversions.Bson)4 Registry (org.apache.drill.exec.proto.UserBitShared.Registry)3 ValueVector (org.apache.drill.exec.vector.ValueVector)3 Admin (org.apache.hadoop.hbase.client.Admin)3 Document (org.bson.Document)3 File (java.io.File)2 UnsupportedEncodingException (java.io.UnsupportedEncodingException)2 URI (java.net.URI)2 RexNode (org.apache.calcite.rex.RexNode)2 LogicalExpression (org.apache.drill.common.expression.LogicalExpression)2