Search in sources :

Example 16 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class DropTableAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    String tableName = getUnescapedName((ASTNode) root.getChild(0));
    boolean ifExists = (root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
    boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT);
    Table table = getTable(tableName, throwException);
    if (table != null) {
        inputs.add(new ReadEntity(table));
        boolean tableWithSuffix = (HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX) || HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED)) && AcidUtils.isTransactionalTable(table) && Boolean.parseBoolean(table.getProperty(SOFT_DELETE_TABLE));
        outputs.add(new WriteEntity(table, tableWithSuffix ? WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE));
    }
    boolean purge = (root.getFirstChildWithType(HiveParser.KW_PURGE) != null);
    ReplicationSpec replicationSpec = new ReplicationSpec(root);
    DropTableDesc desc = new DropTableDesc(tableName, ifExists, purge, replicationSpec);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) ReplicationSpec(org.apache.hadoop.hive.ql.parse.ReplicationSpec) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 17 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class AbstractAddPartitionAnalyzer method analyzeCommand.

@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
    Table table = getTable(tableName);
    validateAlterTableType(table, AlterTableType.ADDPARTITION, expectView());
    boolean ifNotExists = command.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS;
    outputs.add(new WriteEntity(table, /* use DDL_EXCLUSIVE to cause X lock to prevent races between concurrent add partition calls with IF NOT EXISTS.
         * w/o this 2 concurrent calls to add the same partition may both add data since for transactional tables
         * creating partition metadata and moving data there are 2 separate actions. */
    ifNotExists && AcidUtils.isTransactionalTable(table) ? WriteType.DDL_EXCLUSIVE : WriteEntity.WriteType.DDL_SHARED));
    List<AlterTableAddPartitionDesc.PartitionDesc> partitions = createPartitions(command, table, ifNotExists);
    if (partitions.isEmpty()) {
        // nothing to do
        return;
    }
    AlterTableAddPartitionDesc desc = new AlterTableAddPartitionDesc(table.getDbName(), table.getTableName(), ifNotExists, partitions);
    Task<DDLWork> ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(ddlTask);
    postProcess(tableName, table, desc, ddlTask);
}
Also used : Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 18 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class AlterTableTouchAnalyzer method analyzeCommand.

@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
    Table table = getTable(tableName);
    validateAlterTableType(table, AlterTableType.TOUCH, false);
    inputs.add(new ReadEntity(table));
    List<Map<String, String>> partitionSpecs = getPartitionSpecs(table, command);
    if (partitionSpecs.isEmpty()) {
        AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), null);
        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
        rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
    } else {
        PartitionUtils.addTablePartsOutputs(db, outputs, table, partitionSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK);
        for (Map<String, String> partitionSpec : partitionSpecs) {
            AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), partitionSpec);
            rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
        }
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Table(org.apache.hadoop.hive.ql.metadata.Table) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) Map(java.util.Map) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 19 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class ShowTablePropertiesAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    ctx.setResFile(ctx.getLocalTmpPath());
    TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0));
    String propertyName = (root.getChildCount() > 1) ? unescapeSQLString(root.getChild(1).getText()) : null;
    // validate that table exists
    getTable(tableName);
    ShowTablePropertiesDesc desc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableName, propertyName);
    Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA));
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork)

Example 20 with DDLWork

use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.

the class AlterTableSetOwnerAnalyzer method analyzeCommand.

@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
    PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) command.getChild(0));
    if (ownerPrincipal.getType() == null) {
        throw new SemanticException("Owner type can't be null in alter table set owner command");
    }
    if (ownerPrincipal.getName() == null) {
        throw new SemanticException("Owner name can't be null in alter table set owner command");
    }
    AlterTableSetOwnerDesc desc = new AlterTableSetOwnerDesc(tableName, ownerPrincipal);
    addInputsOutputsAlterTable(tableName, null, desc, desc.getType(), false);
    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
    setAcidDdlDesc(getTable(tableName), desc);
}
Also used : PrincipalDesc(org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)153 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)61 Table (org.apache.hadoop.hive.ql.metadata.Table)34 ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)31 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)24 TableName (org.apache.hadoop.hive.common.TableName)23 Test (org.junit.Test)23 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)22 PrincipalDesc (org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc)21 ArrayList (java.util.ArrayList)18 Path (org.apache.hadoop.fs.Path)15 HashMap (java.util.HashMap)14 Database (org.apache.hadoop.hive.metastore.api.Database)12 Task (org.apache.hadoop.hive.ql.exec.Task)12 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 Tree (org.antlr.runtime.tree.Tree)10 HashSet (java.util.HashSet)9 Context (org.apache.hadoop.hive.ql.Context)9 PrivilegeDesc (org.apache.hadoop.hive.ql.ddl.privilege.PrivilegeDesc)9 ShowRoleGrantDesc (org.apache.hadoop.hive.ql.ddl.privilege.show.rolegrant.ShowRoleGrantDesc)8