Search in sources :

Example 61 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.

the class HiveConnectorConfig method createThriftClient.

/**
 * create thrift hive client.
 *
 * @param connectorContext connector config.
 * @return data source
 * @throws MetaException meta exception
 */
@Bean
@ConditionalOnMissingBean(IMetacatHiveClient.class)
public IMetacatHiveClient createThriftClient(final ConnectorContext connectorContext) throws MetaException {
    final HiveMetastoreClientFactory factory = new HiveMetastoreClientFactory(null, (int) TimeUtil.toTime(connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.HIVE_METASTORE_TIMEOUT, "20s"), TimeUnit.SECONDS, TimeUnit.MILLISECONDS));
    final String metastoreUri = connectorContext.getConfiguration().get(HiveConfigConstants.THRIFT_URI);
    try {
        return new MetacatHiveClient(new URI(metastoreUri), factory);
    } catch (Exception e) {
        final String message = String.format("Invalid thrift uri %s", metastoreUri);
        log.info(message);
        throw new IllegalArgumentException(message, e);
    }
}
Also used : HiveMetastoreClientFactory(com.netflix.metacat.connector.hive.client.thrift.HiveMetastoreClientFactory) IMetacatHiveClient(com.netflix.metacat.connector.hive.IMetacatHiveClient) MetacatHiveClient(com.netflix.metacat.connector.hive.client.thrift.MetacatHiveClient) URI(java.net.URI) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) ConditionalOnMissingBean(org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean) ConditionalOnMissingBean(org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean) Bean(org.springframework.context.annotation.Bean)

Example 62 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.

the class MetacatHMSHandler method add_drop_partitions.

/**
 * Adds and drops partitions in one transaction.
 *
 * @param databaseName database name
 * @param tableName    table name
 * @param addParts     list of partitions
 * @param dropParts    list of partition values
 * @param deleteData   if true, deletes the data
 * @return true if successful
 * @throws NoSuchObjectException Exception if table does not exists
 * @throws MetaException         Exception if
 * @throws TException            any internal exception
 */
@SuppressWarnings({ "checkstyle:methodname" })
public boolean add_drop_partitions(final String databaseName, final String tableName, final List<Partition> addParts, final List<List<String>> dropParts, final boolean deleteData) throws NoSuchObjectException, MetaException, TException {
    startFunction("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
    if (addParts.size() == 0 && dropParts.size() == 0) {
        return true;
    }
    for (List<String> partVals : dropParts) {
        LOG.info("Drop Partition values:" + partVals);
    }
    for (Partition part : addParts) {
        LOG.info("Add Partition values:" + part);
    }
    boolean ret = false;
    Exception ex = null;
    try {
        ret = addDropPartitionsCore(getMS(), databaseName, tableName, addParts, dropParts, false, null);
    } catch (Exception e) {
        ex = e;
        if (e instanceof MetaException) {
            throw (MetaException) e;
        } else if (e instanceof InvalidObjectException) {
            throw (InvalidObjectException) e;
        } else if (e instanceof AlreadyExistsException) {
            throw (AlreadyExistsException) e;
        } else if (e instanceof NoSuchObjectException) {
            throw (NoSuchObjectException) e;
        } else {
            throw newMetaException(e);
        }
    } finally {
        endFunction("drop_partitions", ret, ex, tableName);
    }
    return ret;
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidInputException(org.apache.hadoop.hive.metastore.api.InvalidInputException) TException(org.apache.thrift.TException) IOException(java.io.IOException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 63 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.

the class CatalogThriftHiveMetastore method get_schema_with_environment_context.

/**
 * {@inheritDoc}
 */
@Override
public List<FieldSchema> get_schema_with_environment_context(final String dbName, final String tableName, @Nullable final EnvironmentContext environmentContext) throws TException {
    return requestWrapper("get_schema_with_environment_context", new Object[] { dbName, tableName, environmentContext }, () -> {
        final Table table = get_table(dbName, tableName);
        List<FieldSchema> partitionKeys = Collections.emptyList();
        List<FieldSchema> columns = Collections.emptyList();
        if (table != null && table.getSd() != null && table.getSd().getCols() != null) {
            columns = table.getSd().getCols();
        }
        if (table != null && table.getPartitionKeys() != null) {
            partitionKeys = table.getPartitionKeys();
        }
        if (partitionKeys.isEmpty() && columns.isEmpty()) {
            throw new MetaException("Table does not have any partition keys or cols: " + dbName + "." + tableName);
        }
        final List<FieldSchema> result = Lists.newArrayListWithCapacity(partitionKeys.size() + columns.size());
        result.addAll(columns);
        result.addAll(partitionKeys);
        return result;
    });
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 64 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.

the class HiveConnectorDatabaseService method listNames.

/**
 * {@inheritDoc}.
 */
@Override
public List<QualifiedName> listNames(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
    try {
        final List<QualifiedName> qualifiedNames = Lists.newArrayList();
        final String databaseFilter = (prefix != null) ? prefix.getDatabaseName() : null;
        for (String databaseName : metacatHiveClient.getAllDatabases()) {
            final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
            if (databaseFilter != null && !databaseName.startsWith(databaseFilter)) {
                continue;
            }
            qualifiedNames.add(qualifiedName);
        }
        // supporting sort by qualified name only
        if (sort != null) {
            ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
        }
        return ConnectorUtils.paginate(qualifiedNames, pageable);
    } catch (MetaException exception) {
        throw new InvalidMetaException(name, exception);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed listName hive database %s", name), exception);
    }
}
Also used : TException(org.apache.thrift.TException) QualifiedName(com.netflix.metacat.common.QualifiedName) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 65 with MetaException

use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.

the class SemanticAnalyzer method analyzeCreateTable.

/**
 * Analyze the create table command. If it is a regular create-table or
 * create-table-like statements, we create a DDLWork and return true. If it is
 * a create-table-as-select, we get the necessary info such as the SerDe and
 * Storage Format and put it in QB, and return false, indicating the rest of
 * the semantic analyzer need to deal with the select statement with respect
 * to the SerDe and Storage Format.
 */
ASTNode analyzeCreateTable(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
    String[] qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
    String dbDotTab = getDotName(qualifiedTabName);
    String likeTableName = null;
    List<FieldSchema> cols = new ArrayList<FieldSchema>();
    List<FieldSchema> partCols = new ArrayList<FieldSchema>();
    List<String> bucketCols = new ArrayList<String>();
    List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
    List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
    List<SQLUniqueConstraint> uniqueConstraints = new ArrayList<>();
    List<SQLNotNullConstraint> notNullConstraints = new ArrayList<>();
    List<SQLDefaultConstraint> defaultConstraints = new ArrayList<>();
    List<SQLCheckConstraint> checkConstraints = new ArrayList<>();
    List<Order> sortCols = new ArrayList<Order>();
    int numBuckets = -1;
    String comment = null;
    String location = null;
    Map<String, String> tblProps = null;
    boolean ifNotExists = false;
    boolean isExt = false;
    boolean isTemporary = false;
    boolean isMaterialization = false;
    ASTNode selectStmt = null;
    // regular CREATE TABLE
    final int CREATE_TABLE = 0;
    // CREATE TABLE LIKE ... (CTLT)
    final int CTLT = 1;
    // CREATE TABLE AS SELECT ... (CTAS)
    final int CTAS = 2;
    int command_type = CREATE_TABLE;
    List<String> skewedColNames = new ArrayList<String>();
    List<List<String>> skewedValues = new ArrayList<List<String>>();
    Map<List<String>, String> listBucketColValuesMapping = new HashMap<List<String>, String>();
    boolean storedAsDirs = false;
    boolean isUserStorageFormat = false;
    RowFormatParams rowFormatParams = new RowFormatParams();
    StorageFormat storageFormat = new StorageFormat(conf);
    LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
    int numCh = ast.getChildCount();
    /*
     * Check the 1st-level children and do simple semantic checks: 1) CTLT and
     * CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
     * list (target table schema). 3) CTAS does not support partitioning (for
     * now).
     */
    for (int num = 1; num < numCh; num++) {
        ASTNode child = (ASTNode) ast.getChild(num);
        if (storageFormat.fillStorageFormat(child)) {
            isUserStorageFormat = true;
            continue;
        }
        switch(child.getToken().getType()) {
            case HiveParser.TOK_IFNOTEXISTS:
                ifNotExists = true;
                break;
            case HiveParser.KW_EXTERNAL:
                isExt = true;
                break;
            case HiveParser.KW_TEMPORARY:
                isTemporary = true;
                isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
                break;
            case HiveParser.TOK_LIKETABLE:
                if (child.getChildCount() > 0) {
                    likeTableName = getUnescapedName((ASTNode) child.getChild(0));
                    if (likeTableName != null) {
                        if (command_type == CTAS) {
                            throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
                        }
                        if (cols.size() != 0) {
                            throw new SemanticException(ErrorMsg.CTLT_COLLST_COEXISTENCE.getMsg());
                        }
                    }
                    command_type = CTLT;
                }
                break;
            case // CTAS
            HiveParser.TOK_QUERY:
                if (command_type == CTLT) {
                    throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
                }
                if (cols.size() != 0) {
                    throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg());
                }
                if (partCols.size() != 0 || bucketCols.size() != 0) {
                    boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING);
                    if (dynPart == false) {
                        throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
                    } else {
                        // TODO: support dynamic partition for CTAS
                        throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
                    }
                }
                if (isExt) {
                    throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
                }
                command_type = CTAS;
                if (plannerCtx != null) {
                    plannerCtx.setCTASToken(child);
                }
                selectStmt = child;
                break;
            case HiveParser.TOK_TABCOLLIST:
                cols = getColumns(child, true, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
                break;
            case HiveParser.TOK_TABLECOMMENT:
                comment = unescapeSQLString(child.getChild(0).getText());
                break;
            case HiveParser.TOK_TABLEPARTCOLS:
                partCols = getColumns(child, false, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
                if (hasConstraints(partCols, defaultConstraints, notNullConstraints, checkConstraints)) {
                    // TODO: these constraints should be supported for partition columns
                    throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("NOT NULL,DEFAULT and CHECK Constraints are not allowed with " + "partition columns. "));
                }
                break;
            case HiveParser.TOK_ALTERTABLE_BUCKETS:
                bucketCols = getColumnNames((ASTNode) child.getChild(0));
                if (child.getChildCount() == 2) {
                    numBuckets = Integer.parseInt(child.getChild(1).getText());
                } else {
                    sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
                    numBuckets = Integer.parseInt(child.getChild(2).getText());
                }
                break;
            case HiveParser.TOK_TABLEROWFORMAT:
                rowFormatParams.analyzeRowFormat(child);
                break;
            case HiveParser.TOK_TABLELOCATION:
                location = unescapeSQLString(child.getChild(0).getText());
                location = EximUtil.relativeToAbsolutePath(conf, location);
                inputs.add(toReadEntity(location));
                break;
            case HiveParser.TOK_TABLEPROPERTIES:
                tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0));
                break;
            case HiveParser.TOK_TABLESERIALIZER:
                child = (ASTNode) child.getChild(0);
                storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
                if (child.getChildCount() == 2) {
                    readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
                }
                break;
            case HiveParser.TOK_TABLESKEWED:
                /**
                 * Throw an error if the user tries to use the DDL with
                 * hive.internal.ddl.list.bucketing.enable set to false.
                 */
                HiveConf hiveConf = SessionState.get().getConf();
                // skewed column names
                skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, child);
                // skewed value
                analyzeDDLSkewedValues(skewedValues, child);
                // stored as directories
                storedAsDirs = analyzeStoredAdDirs(child);
                break;
            default:
                throw new AssertionError("Unknown token: " + child.getToken());
        }
    }
    if (command_type == CREATE_TABLE || command_type == CTLT) {
        queryState.setCommandType(HiveOperation.CREATETABLE);
    } else if (command_type == CTAS) {
        queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
    } else {
        throw new SemanticException("Unrecognized command.");
    }
    if (isExt && hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints, checkConstraints)) {
        throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed."));
    }
    if (checkConstraints != null && !checkConstraints.isEmpty()) {
        validateCheckConstraint(cols, checkConstraints, ctx.getConf());
    }
    storageFormat.fillDefaultStorageFormat(isExt, false);
    // check for existence of table
    if (ifNotExists) {
        try {
            Table table = getTable(qualifiedTabName, false);
            if (table != null) {
                // table exists
                return null;
            }
        } catch (HiveException e) {
            // should not occur since second parameter to getTableWithQN is false
            throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e);
        }
    }
    if (isTemporary) {
        if (partCols.size() > 0) {
            throw new SemanticException("Partition columns are not supported on temporary tables");
        }
        if (location == null) {
            // it has the same life cycle as the tmp table
            try {
                // Generate a unique ID for temp table path.
                // This path will be fixed for the life of the temp table.
                Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString());
                path = Warehouse.getDnsPath(path, conf);
                location = path.toString();
            } catch (MetaException err) {
                throw new SemanticException("Error while generating temp table path:", err);
            }
        }
    }
    switch(command_type) {
        case // REGULAR CREATE TABLE DDL
        CREATE_TABLE:
            tblProps = addDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
            addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
            CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
            crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
            crtTblDesc.validate(conf);
            // outputs is empty, which means this create table happens in the current
            // database.
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc)));
            break;
        case // create table like <tbl_name>
        CTLT:
            tblProps = addDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
            addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
            if (isTemporary) {
                Table likeTable = getTable(likeTableName, false);
                if (likeTable != null && likeTable.getPartCols().size() > 0) {
                    throw new SemanticException("Partition columns are not supported on temporary tables " + "and source table in CREATE TABLE LIKE is partitioned.");
                }
            }
            CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat);
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc)));
            break;
        case // create table as select
        CTAS:
            if (isTemporary) {
                if (!ctx.isExplainSkipExecution() && !isMaterialization) {
                    String dbName = qualifiedTabName[0];
                    String tblName = qualifiedTabName[1];
                    SessionState ss = SessionState.get();
                    if (ss == null) {
                        throw new SemanticException("No current SessionState, cannot create temporary table " + dbName + "." + tblName);
                    }
                    Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(dbName);
                    if (tables != null && tables.containsKey(tblName)) {
                        throw new SemanticException("Temporary table " + dbName + "." + tblName + " already exists");
                    }
                }
            } else {
                // dumpTable is only used to check the conflict for non-temporary tables
                try {
                    Table dumpTable = db.newTable(dbDotTab);
                    if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
                        throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
                    }
                } catch (HiveException e) {
                    throw new SemanticException(e);
                }
            }
            if (location != null && location.length() != 0) {
                Path locPath = new Path(location);
                FileSystem curFs = null;
                FileStatus locStats = null;
                try {
                    curFs = locPath.getFileSystem(conf);
                    if (curFs != null) {
                        locStats = curFs.getFileStatus(locPath);
                    }
                    if (locStats != null && locStats.isDir()) {
                        FileStatus[] lStats = curFs.listStatus(locPath);
                        if (lStats != null && lStats.length != 0) {
                            // Don't throw an exception if the target location only contains the staging-dirs
                            for (FileStatus lStat : lStats) {
                                if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) {
                                    throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location));
                                }
                            }
                        }
                    }
                } catch (FileNotFoundException nfe) {
                // we will create the folder if it does not exist.
                } catch (IOException ioE) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Exception when validate folder ", ioE);
                    }
                }
            }
            tblProps = addDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
            addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
            tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
            tableDesc.setMaterialization(isMaterialization);
            tableDesc.setStoredAsSubDirectories(storedAsDirs);
            tableDesc.setNullFormat(rowFormatParams.nullFormat);
            qb.setTableDesc(tableDesc);
            return selectStmt;
        default:
            throw new SemanticException("Unrecognized command.");
    }
    return null;
}
Also used : SessionState(org.apache.hadoop.hive.ql.session.SessionState) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) FileStatus(org.apache.hadoop.fs.FileStatus) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) CreateTableLikeDesc(org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc) FileSystem(org.apache.hadoop.fs.FileSystem) LinkedList(java.util.LinkedList) ArrayList(java.util.ArrayList) List(java.util.List) HiveConf(org.apache.hadoop.hive.conf.HiveConf) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) Order(org.apache.hadoop.hive.metastore.api.Order) Path(org.apache.hadoop.fs.Path) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) Table(org.apache.hadoop.hive.ql.metadata.Table) IOException(java.io.IOException) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) CheckConstraint(org.apache.hadoop.hive.ql.metadata.CheckConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork)

Aggregations

MetaException (org.apache.hadoop.hive.metastore.api.MetaException)318 IOException (java.io.IOException)123 ArrayList (java.util.ArrayList)95 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)74 TException (org.apache.thrift.TException)67 Table (org.apache.hadoop.hive.metastore.api.Table)59 Partition (org.apache.hadoop.hive.metastore.api.Partition)57 SQLException (java.sql.SQLException)55 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)53 Path (org.apache.hadoop.fs.Path)45 Connection (java.sql.Connection)36 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)34 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)32 Statement (java.sql.Statement)31 Test (org.junit.Test)30 List (java.util.List)25 Database (org.apache.hadoop.hive.metastore.api.Database)25 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)25 ResultSet (java.sql.ResultSet)22 UnknownDBException (org.apache.hadoop.hive.metastore.api.UnknownDBException)22