Search in sources :

Example 36 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class AbstractDropPartitionAnalyzer method analyzeCommand.

@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
    boolean ifExists = (command.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) || HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT);
    // If the drop has to fail on non-existent partitions, we cannot batch expressions.
    // That is because we actually have to check each separate expression for existence.
    // We could do a small optimization for the case where expr has all columns and all
    // operators are equality, if we assume those would always match one partition (which
    // may not be true with legacy, non-normalized column values). This is probably a
    // popular case but that's kinda hacky. Let's not do it for now.
    boolean canGroupExprs = ifExists;
    boolean mustPurge = (command.getFirstChildWithType(HiveParser.KW_PURGE) != null);
    ReplicationSpec replicationSpec = new ReplicationSpec(command);
    Table table = null;
    try {
        table = getTable(tableName);
    } catch (SemanticException se) {
        if (replicationSpec.isInReplicationScope() && ((se.getCause() instanceof InvalidTableException) || (se.getMessage().contains(ErrorMsg.INVALID_TABLE.getMsg())))) {
            // We just return in that case, no drop needed.
            return;
        // TODO : the contains message check is fragile, we should refactor SemanticException to be
        // queriable for error code, and not simply have a message
        // NOTE : IF_EXISTS might also want to invoke this, but there's a good possibility
        // that IF_EXISTS is stricter about table existence, and applies only to the ptn.
        // Therefore, ignoring IF_EXISTS here.
        } else {
            throw se;
        }
    }
    validateAlterTableType(table, AlterTableType.DROPPARTITION, expectView());
    Map<Integer, List<ExprNodeGenericFuncDesc>> partitionSpecs = ParseUtils.getFullPartitionSpecs(command, table, conf, canGroupExprs);
    if (partitionSpecs.isEmpty()) {
        // nothing to do
        return;
    }
    ReadEntity re = new ReadEntity(table);
    re.noLockNeeded();
    inputs.add(re);
    boolean dropPartUseBase = HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_DROP_PARTITION_USE_BASE) || HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED) && AcidUtils.isTransactionalTable(table);
    addTableDropPartsOutputs(table, partitionSpecs.values(), !ifExists, dropPartUseBase);
    AlterTableDropPartitionDesc desc = new AlterTableDropPartitionDesc(tableName, partitionSpecs, mustPurge, replicationSpec, !dropPartUseBase, table);
    if (desc.mayNeedWriteId()) {
        setAcidDdlDesc(desc);
    }
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException) ArrayList(java.util.ArrayList) List(java.util.List) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 37 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class ShowPartitionAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
    ctx.setResFile(ctx.getLocalTmpPath());
    String tableName = getUnescapedName((ASTNode) ast.getChild(0));
    List<Map<String, String>> partSpecs = getPartitionSpecs(getTable(tableName), ast);
    assert (partSpecs.size() <= 1);
    Map<String, String> partSpec = (partSpecs.size() > 0) ? partSpecs.get(0) : null;
    Table table = getTable(HiveTableName.of(tableName));
    inputs.add(new ReadEntity(table));
    setColumnAccessInfo(new ColumnAccessInfo());
    table.getPartColNames().forEach(col -> getColumnAccessInfo().add(table.getCompleteName(), col));
    ExprNodeDesc filter = getShowPartitionsFilter(table, ast);
    String orderBy = getShowPartitionsOrder(table, ast);
    short limit = getShowPartitionsLimit(ast);
    ShowPartitionsDesc desc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec, filter, orderBy, limit);
    Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    setFetchTask(createFetchTask(ShowPartitionsDesc.SCHEMA));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) HashMap(java.util.HashMap) Map(java.util.Map) ColumnAccessInfo(org.apache.hadoop.hive.ql.parse.ColumnAccessInfo)

Example 38 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class AlterTableExchangePartitionAnalyzer method analyzeCommand.

@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
    Table destTable = getTable(tableName);
    Table sourceTable = getTable(getUnescapedName((ASTNode) command.getChild(1)));
    // Get the partition specs
    Map<String, String> partitionSpecs = getValidatedPartSpec(sourceTable, (ASTNode) command.getChild(0), conf, false);
    PartitionUtils.validatePartitions(conf, partitionSpecs);
    boolean sameColumns = MetaStoreUtils.compareFieldColumns(destTable.getAllCols(), sourceTable.getAllCols());
    boolean samePartitions = MetaStoreUtils.compareFieldColumns(destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
    if (!sameColumns || !samePartitions) {
        throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
    }
    // files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well.
    if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) {
        throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg());
    }
    // check if source partition exists
    PartitionUtils.getPartitions(db, sourceTable, partitionSpecs, true);
    // Verify that the partitions specified are continuous
    // If a subpartition value is specified without specifying a partition's value then we throw an exception
    int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partitionSpecs);
    if (counter < 0) {
        throw new SemanticException(ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partitionSpecs.toString()));
    }
    List<Partition> destPartitions = null;
    try {
        destPartitions = PartitionUtils.getPartitions(db, destTable, partitionSpecs, true);
    } catch (SemanticException ex) {
    // We should expect a semantic exception being throw as this partition should not be present.
    }
    if (destPartitions != null) {
        // If any destination partition is present then throw a Semantic Exception.
        throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
    }
    AlterTableExchangePartitionsDesc desc = new AlterTableExchangePartitionsDesc(sourceTable, destTable, partitionSpecs);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    inputs.add(new ReadEntity(sourceTable));
    outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 39 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class AlterViewAsAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    TableName viewName = getQualifiedTableName((ASTNode) root.getChild(0));
    String fqViewName = viewName.getNotEmptyDbTable();
    LOG.info("Altering the query of view " + fqViewName + " position=" + root.getCharPositionInLine());
    ASTNode select = (ASTNode) root.getChild(1).getChild(0);
    String originalText = ctx.getTokenRewriteStream().toString(select.getTokenStartIndex(), select.getTokenStopIndex());
    SemanticAnalyzer analyzer = analyzeQuery(select, fqViewName);
    schema = new ArrayList<FieldSchema>(analyzer.getResultSchema());
    ParseUtils.validateColumnNameUniqueness(analyzer.getOriginalResultSchema() == null ? schema : analyzer.getOriginalResultSchema());
    String expandedText = ctx.getTokenRewriteStream().toString(select.getTokenStartIndex(), select.getTokenStopIndex());
    AlterViewAsDesc desc = new AlterViewAsDesc(fqViewName, schema, originalText, expandedText);
    validateCreateView(desc, analyzer);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    DDLUtils.addDbAndTableToOutputs(getDatabase(viewName.getDb()), viewName, TableType.VIRTUAL_VIEW, false, null, outputs);
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer)

Example 40 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class CreateViewAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    TableName viewName = getQualifiedTableName((ASTNode) root.getChild(0));
    String fqViewName = viewName.getNotEmptyDbTable();
    LOG.info("Creating view " + fqViewName + " position=" + root.getCharPositionInLine());
    Map<Integer, ASTNode> children = new HashMap<>();
    for (int i = 1; i < root.getChildCount(); i++) {
        ASTNode child = (ASTNode) root.getChild(i);
        children.put(child.getToken().getType(), child);
    }
    List<FieldSchema> imposedSchema = children.containsKey(HiveParser.TOK_TABCOLNAME) ? getColumns((ASTNode) children.remove(HiveParser.TOK_TABCOLNAME)) : null;
    boolean ifNotExists = children.remove(HiveParser.TOK_IFNOTEXISTS) != null;
    boolean orReplace = children.remove(HiveParser.TOK_ORREPLACE) != null;
    String comment = children.containsKey(HiveParser.TOK_TABLECOMMENT) ? unescapeSQLString(children.remove(HiveParser.TOK_TABLECOMMENT).getChild(0).getText()) : null;
    ASTNode select = children.remove(HiveParser.TOK_QUERY);
    Map<String, String> properties = children.containsKey(HiveParser.TOK_TABLEPROPERTIES) ? getProps((ASTNode) children.remove(HiveParser.TOK_TABLEPROPERTIES).getChild(0)) : null;
    List<String> partitionColumnNames = children.containsKey(HiveParser.TOK_VIEWPARTCOLS) ? getColumnNames((ASTNode) children.remove(HiveParser.TOK_VIEWPARTCOLS).getChild(0)) : null;
    assert children.isEmpty();
    if (ifNotExists && orReplace) {
        throw new SemanticException("Can't combine IF NOT EXISTS and OR REPLACE.");
    }
    String originalText = ctx.getTokenRewriteStream().toString(select.getTokenStartIndex(), select.getTokenStopIndex());
    SemanticAnalyzer analyzer = analyzeQuery(select, fqViewName);
    schema = new ArrayList<FieldSchema>(analyzer.getResultSchema());
    ParseUtils.validateColumnNameUniqueness(analyzer.getOriginalResultSchema() == null ? schema : analyzer.getOriginalResultSchema());
    String expandedText = getExpandedText(imposedSchema, select, viewName);
    List<FieldSchema> partitionColumns = getPartitionColumns(imposedSchema, select, viewName, partitionColumnNames);
    CreateViewDesc desc = new CreateViewDesc(fqViewName, schema, comment, properties, partitionColumnNames, ifNotExists, orReplace, originalText, expandedText, partitionColumns);
    validateCreateView(desc, analyzer);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    DDLUtils.addDbAndTableToOutputs(getDatabase(viewName.getDb()), viewName, TableType.VIRTUAL_VIEW, false, properties, outputs);
}
Also used : HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer) TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)153 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)61 Table (org.apache.hadoop.hive.ql.metadata.Table)34 ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)31 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)24 TableName (org.apache.hadoop.hive.common.TableName)23 Test (org.junit.Test)23 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)22 PrincipalDesc (org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc)21 ArrayList (java.util.ArrayList)18 Path (org.apache.hadoop.fs.Path)15 HashMap (java.util.HashMap)14 Database (org.apache.hadoop.hive.metastore.api.Database)12 Task (org.apache.hadoop.hive.ql.exec.Task)12 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 Tree (org.antlr.runtime.tree.Tree)10 HashSet (java.util.HashSet)9 Context (org.apache.hadoop.hive.ql.Context)9 PrivilegeDesc (org.apache.hadoop.hive.ql.ddl.privilege.PrivilegeDesc)9 ShowRoleGrantDesc (org.apache.hadoop.hive.ql.ddl.privilege.show.rolegrant.ShowRoleGrantDesc)8