Search in sources :

Example 41 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class ConstraintsUtils method processForeignKeys.

public static void processForeignKeys(TableName tableName, ASTNode node, List<SQLForeignKey> foreignKeys) throws SemanticException {
    // The ANTLR grammar looks like :
    // 1.  KW_CONSTRAINT idfr=identifier KW_FOREIGN KW_KEY fkCols=columnParenthesesList
    // KW_REFERENCES tabName=tableName parCols=columnParenthesesList
    // enableSpec=enableSpecification validateSpec=validateSpecification relySpec=relySpecification
    // -> ^(TOK_FOREIGN_KEY $idfr $fkCols $tabName $parCols $relySpec $enableSpec $validateSpec)
    // when the user specifies the constraint name (i.e. child.getChildCount() == 7)
    // 2.  KW_FOREIGN KW_KEY fkCols=columnParenthesesList
    // KW_REFERENCES tabName=tableName parCols=columnParenthesesList
    // enableSpec=enableSpecification validateSpec=validateSpecification relySpec=relySpecification
    // -> ^(TOK_FOREIGN_KEY $fkCols  $tabName $parCols $relySpec $enableSpec $validateSpec)
    // when the user does not specify the constraint name (i.e. child.getChildCount() == 6)
    String constraintName = null;
    boolean enable = true;
    boolean validate = true;
    boolean rely = false;
    int fkIndex = -1;
    for (int i = 0; i < node.getChildCount(); i++) {
        ASTNode grandChild = (ASTNode) node.getChild(i);
        int type = grandChild.getToken().getType();
        if (type == HiveParser.TOK_CONSTRAINT_NAME) {
            constraintName = BaseSemanticAnalyzer.unescapeIdentifier(grandChild.getChild(0).getText().toLowerCase());
        } else if (type == HiveParser.TOK_ENABLE) {
            enable = true;
            // validate is true by default if we enable the constraint
            validate = true;
        } else if (type == HiveParser.TOK_DISABLE) {
            enable = false;
            // validate is false by default if we disable the constraint
            validate = false;
        } else if (type == HiveParser.TOK_VALIDATE) {
            validate = true;
        } else if (type == HiveParser.TOK_NOVALIDATE) {
            validate = false;
        } else if (type == HiveParser.TOK_RELY) {
            rely = true;
        } else if (type == HiveParser.TOK_TABCOLNAME && fkIndex == -1) {
            fkIndex = i;
        }
    }
    if (enable) {
        throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg("ENABLE feature not supported yet. " + "Please use DISABLE instead."));
    }
    if (validate) {
        throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg("VALIDATE feature not supported yet. " + "Please use NOVALIDATE instead."));
    }
    int ptIndex = fkIndex + 1;
    int pkIndex = ptIndex + 1;
    if (node.getChild(fkIndex).getChildCount() != node.getChild(pkIndex).getChildCount()) {
        throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg(" The number of foreign key columns should be same as number of parent key columns "));
    }
    TableName parentTblName = BaseSemanticAnalyzer.getQualifiedTableName((ASTNode) node.getChild(ptIndex));
    for (int j = 0; j < node.getChild(fkIndex).getChildCount(); j++) {
        SQLForeignKey sqlForeignKey = new SQLForeignKey();
        sqlForeignKey.setFktable_db(tableName.getDb());
        sqlForeignKey.setFktable_name(tableName.getTable());
        Tree fkgrandChild = node.getChild(fkIndex).getChild(j);
        BaseSemanticAnalyzer.checkColumnName(fkgrandChild.getText());
        sqlForeignKey.setFkcolumn_name(BaseSemanticAnalyzer.unescapeIdentifier(fkgrandChild.getText().toLowerCase()));
        sqlForeignKey.setPktable_db(parentTblName.getDb());
        sqlForeignKey.setPktable_name(parentTblName.getTable());
        Tree pkgrandChild = node.getChild(pkIndex).getChild(j);
        sqlForeignKey.setPkcolumn_name(BaseSemanticAnalyzer.unescapeIdentifier(pkgrandChild.getText().toLowerCase()));
        sqlForeignKey.setKey_seq(j + 1);
        sqlForeignKey.setFk_name(constraintName);
        sqlForeignKey.setEnable_cstr(enable);
        sqlForeignKey.setValidate_cstr(validate);
        sqlForeignKey.setRely_cstr(rely);
        foreignKeys.add(sqlForeignKey);
    }
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) SQLForeignKey(org.apache.hadoop.hive.metastore.api.SQLForeignKey) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) Tree(org.antlr.runtime.tree.Tree) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 42 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class SemanticAnalyzer method setStatsForNonNativeTable.

private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException {
    TableName qTableName = HiveTableName.ofNullable(tableName, dbName);
    Map<String, String> mapProp = new HashMap<>();
    mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null);
    AlterTableUnsetPropertiesDesc alterTblDesc = new AlterTableUnsetPropertiesDesc(qTableName, null, null, false, mapProp, false, null);
    this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) AlterTableUnsetPropertiesDesc(org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableUnsetPropertiesDesc)

Example 43 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class MsckOperation method execute.

@Override
public int execute() throws HiveException, IOException, TException {
    try {
        Msck msck = new Msck(false, false);
        msck.init(Msck.getMsckConf(context.getDb().getConf()));
        msck.updateExpressionProxy(getProxyClass(context.getDb().getConf()));
        TableName tableName = HiveTableName.of(desc.getTableName());
        long partitionExpirySeconds = -1L;
        try (HiveMetaStoreClient msc = new HiveMetaStoreClient(context.getConf())) {
            boolean msckEnablePartitionRetention = MetastoreConf.getBoolVar(context.getConf(), MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION);
            if (msckEnablePartitionRetention) {
                Table table = msc.getTable(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable());
                String qualifiedTableName = Warehouse.getCatalogQualifiedTableName(table);
                partitionExpirySeconds = PartitionManagementTask.getRetentionPeriodInSeconds(table);
                LOG.info("{} - Retention period ({}s) for partition is enabled for MSCK REPAIR..", qualifiedTableName, partitionExpirySeconds);
            }
        }
        MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable(), desc.getFilterExp(), desc.getResFile(), desc.isRepairPartitions(), desc.isAddPartitions(), desc.isDropPartitions(), partitionExpirySeconds);
        return msck.repair(msckInfo);
    } catch (MetaException e) {
        LOG.error("Unable to create msck instance.", e);
        return 1;
    } catch (SemanticException e) {
        LOG.error("Msck failed.", e);
        return 1;
    }
}
Also used : HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) TableName(org.apache.hadoop.hive.common.TableName) MsckInfo(org.apache.hadoop.hive.metastore.MsckInfo) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Table(org.apache.hadoop.hive.metastore.api.Table) Msck(org.apache.hadoop.hive.metastore.Msck) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 44 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class TruncateTableAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    // TOK_TABLE_PARTITION
    ASTNode tableNode = (ASTNode) root.getChild(0);
    String tableNameString = getUnescapedName((ASTNode) tableNode.getChild(0));
    Table table = getTable(tableNameString, true);
    TableName tableName = HiveTableName.of(table);
    checkTruncateEligibility(root, tableNode, tableNameString, table);
    Map<String, String> partitionSpec = getPartSpec((ASTNode) tableNode.getChild(1));
    addTruncateTableOutputs(tableNode, table, partitionSpec);
    Task<?> truncateTask = null;
    ASTNode colNamesNode = (ASTNode) root.getFirstChildWithType(HiveParser.TOK_TABCOLNAME);
    if (colNamesNode == null) {
        truncateTask = getTruncateTaskWithoutColumnNames(tableName, partitionSpec, table);
    } else {
        truncateTask = getTruncateTaskWithColumnNames(tableNode, tableName, table, partitionSpec, colNamesNode);
    }
    rootTasks.add(truncateTask);
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) Table(org.apache.hadoop.hive.ql.metadata.Table) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Example 45 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AlterTableDropPartitionOperation method dropPartitions.

private void dropPartitions(boolean isRepl) throws HiveException {
    // ifExists is currently verified in AlterTableDropPartitionAnalyzer
    TableName tableName = HiveTableName.of(desc.getTableName());
    List<Pair<Integer, byte[]>> partitionExpressions = new ArrayList<>(desc.getPartSpecs().size());
    for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()) {
        partitionExpressions.add(Pair.of(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec())));
    }
    PartitionDropOptions options = PartitionDropOptions.instance().deleteData(desc.getDeleteData()).ifExists(true).purgeData(desc.getIfPurge());
    List<Partition> droppedPartitions = context.getDb().dropPartitions(tableName.getDb(), tableName.getTable(), partitionExpressions, options);
    if (isRepl) {
        LOG.info("Dropped {} partitions for replication.", droppedPartitions.size());
        // If replaying an event, we need not to bother about the further steps, we can return from here itself.
        return;
    }
    ProactiveEviction.Request.Builder llapEvictRequestBuilder = LlapHiveUtils.isLlapMode(context.getConf()) ? ProactiveEviction.Request.Builder.create() : null;
    for (Partition partition : droppedPartitions) {
        context.getConsole().printInfo("Dropped the partition " + partition.getName());
        // We have already locked the table, don't lock the partitions.
        DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context);
        if (llapEvictRequestBuilder != null) {
            llapEvictRequestBuilder.addPartitionOfATable(tableName.getDb(), tableName.getTable(), partition.getSpec());
        }
    }
    if (llapEvictRequestBuilder != null) {
        ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
    }
}
Also used : HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) TableName(org.apache.hadoop.hive.common.TableName) Partition(org.apache.hadoop.hive.ql.metadata.Partition) PartitionDropOptions(org.apache.hadoop.hive.metastore.PartitionDropOptions) ArrayList(java.util.ArrayList) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Pair(org.apache.commons.lang3.tuple.Pair)

Aggregations

TableName (org.apache.hadoop.hive.common.TableName)47 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)22 ArrayList (java.util.ArrayList)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)11 HiveTableName (org.apache.hadoop.hive.ql.parse.HiveTableName)10 HashMap (java.util.HashMap)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 List (java.util.List)7 Path (org.apache.hadoop.fs.Path)7 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)7 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)6