Search in sources :

Example 6 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class DescTableAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    ctx.setResFile(ctx.getLocalTmpPath());
    ASTNode tableTypeExpr = (ASTNode) root.getChild(0);
    TableName tableName = getQualifiedTableName((ASTNode) tableTypeExpr.getChild(0));
    // if database is not the one currently using validate database
    if (tableName.getDb() != null) {
        db.validateDatabaseExists(tableName.getDb());
    }
    Table table = getTable(tableName);
    // process the second child, if exists, node to get partition spec(s)
    Map<String, String> partitionSpec = getPartitionSpec(db, tableTypeExpr, tableName);
    if (partitionSpec != null) {
        // validate that partition exists
        PartitionUtils.getPartition(db, table, partitionSpec, true);
    }
    // process the third child node,if exists, to get partition spec(s)
    String columnPath = getColumnPath(db, tableTypeExpr, tableName, partitionSpec);
    boolean showColStats = false;
    boolean isFormatted = false;
    boolean isExt = false;
    if (root.getChildCount() == 2) {
        int descOptions = root.getChild(1).getType();
        isFormatted = descOptions == HiveParser.KW_FORMATTED;
        isExt = descOptions == HiveParser.KW_EXTENDED;
        // This is how we can differentiate if we are describing a table or column.
        if (columnPath != null && isFormatted) {
            showColStats = true;
        }
    }
    inputs.add(new ReadEntity(table));
    DescTableDesc desc = new DescTableDesc(ctx.getResFile(), tableName, partitionSpec, columnPath, isExt, isFormatted);
    Task<?> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA;
    setFetchTask(createFetchTask(schema));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) TableName(org.apache.hadoop.hive.common.TableName) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Example 7 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class DescTableOperation method getColumnDataColPathSpecified.

private void getColumnDataColPathSpecified(Table table, Partition part, List<FieldSchema> cols, List<ColumnStatisticsObj> colStats, Deserializer deserializer) throws SemanticException, HiveException, MetaException {
    // when column name is specified in describe table DDL, colPath will be db_name.table_name.column_name
    String colName = desc.getColumnPath().split("\\.")[2];
    List<String> colNames = Lists.newArrayList(colName.toLowerCase());
    TableName tableName = HiveTableName.of(desc.getDbTableName());
    if (null == part) {
        if (table.isPartitioned()) {
            Map<String, String> tableProps = table.getParameters() == null ? new HashMap<String, String>() : table.getParameters();
            if (table.isPartitionKey(colNames.get(0))) {
                getColumnDataForPartitionKeyColumn(table, cols, colStats, colNames, tableProps);
            } else {
                getColumnsForNotPartitionKeyColumn(cols, colStats, deserializer, colNames, tableName, tableProps);
            }
            table.setParameters(tableProps);
        } else {
            cols.addAll(Hive.getFieldsFromDeserializer(desc.getColumnPath(), deserializer));
            colStats.addAll(context.getDb().getTableColumnStatistics(tableName.getDb().toLowerCase(), tableName.getTable().toLowerCase(), colNames, false));
        }
    } else {
        List<String> partitions = new ArrayList<String>();
        // The partition name is converted to lowercase before generating the stats. So we should use the same
        // lower case name to get the stats.
        String partName = HMSHandler.lowerCaseConvertPartName(part.getName());
        partitions.add(partName);
        cols.addAll(Hive.getFieldsFromDeserializer(desc.getColumnPath(), deserializer));
        Map<String, List<ColumnStatisticsObj>> partitionColumnStatistics = context.getDb().getPartitionColumnStatistics(tableName.getDb().toLowerCase(), tableName.getTable().toLowerCase(), partitions, colNames, false);
        List<ColumnStatisticsObj> partitionColStat = partitionColumnStatistics.get(partName);
        if (partitionColStat != null) {
            colStats.addAll(partitionColStat);
        }
    }
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) ColumnStatisticsObj(org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List)

Example 8 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class DropTableOperation method execute.

@Override
public int execute() throws HiveException {
    Table table = getTable();
    if (table == null) {
        // dropping not existing table is handled by DropTableAnalyzer
        return 0;
    }
    if (desc.getValidationRequired()) {
        if (table.isView() || table.isMaterializedView()) {
            if (desc.isIfExists()) {
                return 0;
            } else if (table.isView()) {
                throw new HiveException("Cannot drop a view with DROP TABLE");
            } else {
                throw new HiveException("Cannot drop a materialized view with DROP TABLE");
            }
        }
    }
    ReplicationSpec replicationSpec = desc.getReplicationSpec();
    if (replicationSpec.isInReplicationScope()) {
        /**
         * DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely
         * matches a DROP TABLE IF OLDER THAN(x) semantic.
         *
         * Ideally, commands executed under the scope of replication need to be idempotent and resilient
         * to repeats. What can happen, sometimes, is that a drone processing a replication task can
         * have been abandoned for not returning in time, but still execute its task after a while,
         * which should not result in it mucking up data that has been impressed later on. So, for eg.,
         * if we create partition P1, followed by droppping it, followed by creating it yet again,
         * the replication of that drop should not drop the newer partition if it runs after the destination
         * object is already in the newer state.
         *
         * Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can
         * drop the object in question(will return false if object is newer than the event, true if not)
         *
         * In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP
         * TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must
         * do one more thing - if it does not drop the table because the table is in a newer state, it must
         * drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL
         * acts like a recursive DROP TABLE IF OLDER.
         */
        Map<String, String> dbParams = context.getDb().getDatabase(table.getDbName()).getParameters();
        if (!replicationSpec.allowEventReplacementInto(dbParams)) {
            // any partitions inside that are older.
            if (table.isPartitioned()) {
                PartitionIterable partitions = new PartitionIterable(context.getDb(), table, null, MetastoreConf.getIntVar(context.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX));
                for (Partition p : partitions) {
                    if (replicationSpec.allowEventReplacementInto(dbParams)) {
                        context.getDb().dropPartition(table.getDbName(), table.getTableName(), p.getValues(), true);
                    }
                }
            }
            LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", desc.getTableName());
            // table is newer, leave it be.
            return 0;
        }
    }
    // TODO: API w/catalog name
    context.getDb().dropTable(table, desc.isPurge());
    DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context);
    if (LlapHiveUtils.isLlapMode(context.getConf())) {
        TableName tableName = HiveTableName.of(table);
        ProactiveEviction.Request.Builder llapEvictRequestBuilder = ProactiveEviction.Request.Builder.create();
        llapEvictRequestBuilder.addTable(tableName.getDb(), tableName.getTable());
        ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
    }
    return 0;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) TableName(org.apache.hadoop.hive.common.TableName) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) PartitionIterable(org.apache.hadoop.hive.ql.metadata.PartitionIterable) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 9 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class ShowTablePropertiesAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    ctx.setResFile(ctx.getLocalTmpPath());
    TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0));
    String propertyName = (root.getChildCount() > 1) ? unescapeSQLString(root.getChild(1).getText()) : null;
    // validate that table exists
    getTable(tableName);
    ShowTablePropertiesDesc desc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableName, propertyName);
    Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA));
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork)

Example 10 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AbstractAlterTableAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0), MetaStoreUtils.getDefaultCatalog(conf));
    ASTNode command = (ASTNode) root.getChild(1);
    Map<String, String> partitionSpec = null;
    ASTNode partitionSpecNode = (ASTNode) root.getChild(2);
    if (partitionSpecNode != null) {
        // passed in this command.
        if (command.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) {
            partitionSpec = getPartSpec(partitionSpecNode);
        } else {
            partitionSpec = getValidatedPartSpec(getTable(tableName), partitionSpecNode, conf, false);
        }
    }
    analyzeCommand(tableName, partitionSpec, command);
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Aggregations

TableName (org.apache.hadoop.hive.common.TableName)47 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)22 ArrayList (java.util.ArrayList)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)11 HiveTableName (org.apache.hadoop.hive.ql.parse.HiveTableName)10 HashMap (java.util.HashMap)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 List (java.util.List)7 Path (org.apache.hadoop.fs.Path)7 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)7 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)6