Search in sources :

Example 86 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeShowColumns.

private void analyzeShowColumns(ASTNode ast) throws SemanticException {
    // table name has to be present so min child 1 and max child 4
    if (ast.getChildCount() > 4 || ast.getChildCount() < 1) {
        throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(ast.toStringTree()));
    }
    String tableName = getUnescapedName((ASTNode) ast.getChild(0));
    ShowColumnsDesc showColumnsDesc = null;
    String pattern = null;
    switch(ast.getChildCount()) {
        case // only tablename no pattern and db
        1:
            showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
            break;
        case // tablename and pattern
        2:
            pattern = unescapeSQLString(ast.getChild(1).getText());
            showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName, pattern);
            break;
        case // specifies db
        3:
            if (tableName.contains(".")) {
                throw new SemanticException("Duplicates declaration for database name");
            }
            tableName = getUnescapedName((ASTNode) ast.getChild(2)) + "." + tableName;
            showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
            break;
        case // specifies db and pattern
        4:
            if (tableName.contains(".")) {
                throw new SemanticException("Duplicates declaration for database name");
            }
            tableName = getUnescapedName((ASTNode) ast.getChild(2)) + "." + tableName;
            pattern = unescapeSQLString(ast.getChild(3).getText());
            showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName, pattern);
            break;
        default:
            break;
    }
    Table tab = getTable(tableName);
    inputs.add(new ReadEntity(tab));
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc)));
    setFetchTask(createFetchTask(showColumnsDesc.getSchema()));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) ShowColumnsDesc(org.apache.hadoop.hive.ql.plan.ShowColumnsDesc)

Example 87 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeShowCreateDatabase.

private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException {
    String dbName = getUnescapedName((ASTNode) ast.getChild(0));
    ShowCreateDatabaseDesc showCreateDbDesc = new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString());
    Database database = getDatabase(dbName);
    inputs.add(new ReadEntity(database));
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showCreateDbDesc)));
    setFetchTask(createFetchTask(showCreateDbDesc.getSchema()));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) Database(org.apache.hadoop.hive.metastore.api.Database) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc)

Example 88 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeExchangePartition.

private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException {
    Table destTable = getTable(qualified);
    Table sourceTable = getTable(getUnescapedName((ASTNode) ast.getChild(1)));
    // Get the partition specs
    Map<String, String> partSpecs = getValidatedPartSpec(sourceTable, (ASTNode) ast.getChild(0), conf, false);
    validatePartitionValues(partSpecs);
    boolean sameColumns = MetaStoreUtils.compareFieldColumns(destTable.getAllCols(), sourceTable.getAllCols());
    boolean samePartitions = MetaStoreUtils.compareFieldColumns(destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
    if (!sameColumns || !samePartitions) {
        throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
    }
    // files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well.
    if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) {
        throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg());
    }
    // check if source partition exists
    getPartitions(sourceTable, partSpecs, true);
    // Verify that the partitions specified are continuous
    // If a subpartition value is specified without specifying a partition's value
    // then we throw an exception
    int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs);
    if (counter < 0) {
        throw new SemanticException(ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
    }
    List<Partition> destPartitions = null;
    try {
        destPartitions = getPartitions(destTable, partSpecs, true);
    } catch (SemanticException ex) {
    // We should expect a semantic exception being throw as this partition
    // should not be present.
    }
    if (destPartitions != null) {
        // If any destination partition is present then throw a Semantic Exception.
        throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
    }
    AlterTableExchangePartition alterTableExchangePartition = new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTableExchangePartition)));
    inputs.add(new ReadEntity(sourceTable));
    outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) Table(org.apache.hadoop.hive.ql.metadata.Table) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 89 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAlterMaterializedViewRewrite.

private void analyzeAlterMaterializedViewRewrite(String mvName, ASTNode ast) throws SemanticException {
    // Value for the flag
    boolean enableFlag;
    switch(ast.getChild(0).getType()) {
        case HiveParser.TOK_REWRITE_ENABLED:
            enableFlag = true;
            break;
        case HiveParser.TOK_REWRITE_DISABLED:
            enableFlag = false;
            break;
        default:
            throw new SemanticException("Invalid alter materialized view expression");
    }
    AlterMaterializedViewDesc alterMVDesc = new AlterMaterializedViewDesc(AlterMaterializedViewTypes.UPDATE_REWRITE_FLAG);
    alterMVDesc.setMaterializedViewName(mvName);
    alterMVDesc.setRewriteEnableFlag(enableFlag);
    // It can be fully qualified name or use default database
    Table materializedViewTable = getTable(mvName, true);
    // only uses transactional (MM and ACID) tables
    if (enableFlag) {
        for (String tableName : materializedViewTable.getCreationMetadata().getTablesUsed()) {
            Table table = getTable(tableName, true);
            if (!AcidUtils.isTransactionalTable(table)) {
                throw new SemanticException("Automatic rewriting for materialized view cannot " + "be enabled if the materialized view uses non-transactional tables");
            }
        }
    }
    inputs.add(new ReadEntity(materializedViewTable));
    outputs.add(new WriteEntity(materializedViewTable, WriteEntity.WriteType.DDL_EXCLUSIVE));
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterMVDesc)));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) AlterMaterializedViewDesc(org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc)

Example 90 with ReadEntity

use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.

the class DDLSemanticAnalyzer method analyzeAltertableSkewedby.

/**
 * Analyze alter table's skewed table
 *
 * @param ast
 *          node
 * @throws SemanticException
 */
private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws SemanticException {
    /**
     * Throw an error if the user tries to use the DDL with
     * hive.internal.ddl.list.bucketing.enable set to false.
     */
    HiveConf hiveConf = SessionState.get().getConf();
    Table tab = getTable(qualified);
    inputs.add(new ReadEntity(tab));
    outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
    validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY);
    String tableName = getDotName(qualified);
    if (ast.getChildCount() == 0) {
        /* Convert a skewed table to non-skewed table. */
        AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, new ArrayList<String>(), new ArrayList<List<String>>());
        alterTblDesc.setStoredAsSubDirectories(false);
        rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
    } else {
        switch(((ASTNode) ast.getChild(0)).getToken().getType()) {
            case HiveParser.TOK_TABLESKEWED:
                handleAlterTableSkewedBy(ast, tableName, tab);
                break;
            case HiveParser.TOK_STOREDASDIRS:
                handleAlterTableDisableStoredAsDirs(tableName, tab);
                break;
            default:
                assert false;
        }
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) LinkedList(java.util.LinkedList) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Aggregations

ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)139 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)70 Table (org.apache.hadoop.hive.ql.metadata.Table)69 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)31 Partition (org.apache.hadoop.hive.ql.metadata.Partition)29 ArrayList (java.util.ArrayList)27 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)27 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)24 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)22 HashMap (java.util.HashMap)16 Test (org.testng.annotations.Test)16 Map (java.util.Map)13 LinkedHashMap (java.util.LinkedHashMap)12 Path (org.apache.hadoop.fs.Path)12 List (java.util.List)11 Database (org.apache.hadoop.hive.metastore.api.Database)11 AtlasEntity (org.apache.atlas.model.instance.AtlasEntity)10 Referenceable (org.apache.atlas.typesystem.Referenceable)10 HashSet (java.util.HashSet)9 FileNotFoundException (java.io.FileNotFoundException)7