Search in sources :

Example 1 with WriteType

use of org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType in project hive by apache.

the class DDLSemanticAnalyzer method addInputsOutputsAlterTable.

private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec, AlterTableDesc desc, AlterTableTypes op) throws SemanticException {
    boolean isCascade = desc != null && desc.getIsCascade();
    boolean alterPartitions = partSpec != null && !partSpec.isEmpty();
    //cascade only occurs at table level then cascade to partition level
    if (isCascade && alterPartitions) {
        throw new SemanticException(ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName());
    }
    Table tab = getTable(tableName, true);
    // Determine the lock type to acquire
    WriteEntity.WriteType writeType = WriteEntity.determineAlterTableWriteType(op);
    if (!alterPartitions) {
        inputs.add(new ReadEntity(tab));
        outputs.add(new WriteEntity(tab, writeType));
        //do not need the lock for partitions since they are covered by the table lock
        if (isCascade) {
            for (Partition part : getPartitions(tab, partSpec, false)) {
                outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
            }
        }
    } else {
        ReadEntity re = new ReadEntity(tab);
        // In the case of altering a table for its partitions we don't need to lock the table
        // itself, just the partitions.  But the table will have a ReadEntity.  So mark that
        // ReadEntity as no lock.
        re.noLockNeeded();
        inputs.add(re);
        if (isFullSpec(tab, partSpec)) {
            // Fully specified partition spec
            Partition part = getPartition(tab, partSpec, true);
            outputs.add(new WriteEntity(part, writeType));
        } else {
            // Partial partition spec supplied. Make sure this is allowed.
            if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) {
                throw new SemanticException(ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName());
            } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) {
                throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED);
            }
            for (Partition part : getPartitions(tab, partSpec, true)) {
                outputs.add(new WriteEntity(part, writeType));
            }
        }
    }
    if (desc != null) {
        validateAlterTableType(tab, op, desc.getExpectView());
        // validate Unset Non Existed Table Properties
        if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) {
            Map<String, String> tableParams = tab.getTTable().getParameters();
            for (String currKey : desc.getProps().keySet()) {
                if (!tableParams.containsKey(currKey)) {
                    String errorMsg = "The following property " + currKey + " does not exist in " + tab.getTableName();
                    throw new SemanticException(ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg));
                }
            }
        }
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) WriteType(org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType) Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) Table(org.apache.hadoop.hive.ql.metadata.Table) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Example 2 with WriteType

use of org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType in project hive by apache.

the class SemanticAnalyzer method validate.

@Override
public void validate() throws SemanticException {
    LOG.debug("validation start");
    boolean wasAcidChecked = false;
    // Validate inputs and outputs have right protectmode to execute the query
    for (ReadEntity readEntity : getInputs()) {
        ReadEntity.Type type = readEntity.getType();
        if (type != ReadEntity.Type.TABLE && type != ReadEntity.Type.PARTITION) {
            // here to make the logic complete.
            continue;
        }
        Table tbl = readEntity.getTable();
        Partition p = readEntity.getPartition();
        if (p != null) {
            tbl = p.getTable();
        }
        if (tbl != null && AcidUtils.isTransactionalTable(tbl)) {
            transactionalInQuery = true;
            if (!wasAcidChecked) {
                checkAcidTxnManager(tbl);
            }
            wasAcidChecked = true;
        }
    }
    for (WriteEntity writeEntity : getOutputs()) {
        WriteEntity.Type type = writeEntity.getType();
        if (type == WriteEntity.Type.PARTITION || type == WriteEntity.Type.DUMMYPARTITION) {
            String conflictingArchive = null;
            try {
                Partition usedp = writeEntity.getPartition();
                Table tbl = usedp.getTable();
                if (AcidUtils.isTransactionalTable(tbl)) {
                    transactionalInQuery = true;
                    if (!wasAcidChecked) {
                        checkAcidTxnManager(tbl);
                    }
                    wasAcidChecked = true;
                }
                LOG.debug("validated " + usedp.getName());
                LOG.debug(usedp.getTable().getTableName());
                WriteEntity.WriteType writeType = writeEntity.getWriteType();
                if (writeType != WriteType.UPDATE && writeType != WriteType.DELETE) {
                    // Do not check for ACID; it does not create new parts and this is expensive as hell.
                    // TODO: add an API to get table name list for archived parts with a single call;
                    // nobody uses this so we could skip the whole thing.
                    conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, tbl, usedp.getSpec());
                }
            } catch (HiveException e) {
                throw new SemanticException(e);
            }
            if (conflictingArchive != null) {
                String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
                throw new SemanticException(message);
            }
        } else if (type == WriteEntity.Type.TABLE) {
            Table tbl = writeEntity.getTable();
            if (AcidUtils.isTransactionalTable(tbl)) {
                transactionalInQuery = true;
                if (!wasAcidChecked) {
                    checkAcidTxnManager(tbl);
                }
                wasAcidChecked = true;
            }
        }
        if (type != WriteEntity.Type.TABLE && type != WriteEntity.Type.PARTITION) {
            LOG.debug("not validating writeEntity, because entity is neither table nor partition");
            continue;
        }
    }
    boolean reworkMapredWork = HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_REWORK_MAPREDWORK);
    // validate all tasks
    for (Task<? extends Serializable> rootTask : rootTasks) {
        validate(rootTask, reworkMapredWork);
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Partition(org.apache.hadoop.hive.ql.metadata.Partition) DummyPartition(org.apache.hadoop.hive.ql.metadata.DummyPartition) WriteType(org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)

Example 3 with WriteType

use of org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType in project hive by apache.

the class DDLSemanticAnalyzer method addInputsOutputsAlterTable.

private void addInputsOutputsAlterTable(String tableName, Map<String, String> partSpec, AlterTableDesc desc, AlterTableTypes op, boolean doForceExclusive) throws SemanticException {
    boolean isCascade = desc != null && desc.getIsCascade();
    boolean alterPartitions = partSpec != null && !partSpec.isEmpty();
    // cascade only occurs at table level then cascade to partition level
    if (isCascade && alterPartitions) {
        throw new SemanticException(ErrorMsg.ALTER_TABLE_PARTITION_CASCADE_NOT_SUPPORTED, op.getName());
    }
    Table tab = getTable(tableName, true);
    // cascade only occurs with partitioned table
    if (isCascade && !tab.isPartitioned()) {
        throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_PARTITIONED_TABLE_CASCADE_NOT_SUPPORTED);
    }
    // Determine the lock type to acquire
    WriteEntity.WriteType writeType = doForceExclusive ? WriteType.DDL_EXCLUSIVE : determineAlterTableWriteType(tab, desc, op);
    if (!alterPartitions) {
        inputs.add(new ReadEntity(tab));
        alterTableOutput = new WriteEntity(tab, writeType);
        outputs.add(alterTableOutput);
        // do not need the lock for partitions since they are covered by the table lock
        if (isCascade) {
            for (Partition part : getPartitions(tab, partSpec, false)) {
                outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
            }
        }
    } else {
        ReadEntity re = new ReadEntity(tab);
        // In the case of altering a table for its partitions we don't need to lock the table
        // itself, just the partitions.  But the table will have a ReadEntity.  So mark that
        // ReadEntity as no lock.
        re.noLockNeeded();
        inputs.add(re);
        if (isFullSpec(tab, partSpec)) {
            // Fully specified partition spec
            Partition part = getPartition(tab, partSpec, true);
            outputs.add(new WriteEntity(part, writeType));
        } else {
            // Partial partition spec supplied. Make sure this is allowed.
            if (!AlterTableDesc.doesAlterTableTypeSupportPartialPartitionSpec(op)) {
                throw new SemanticException(ErrorMsg.ALTER_TABLE_TYPE_PARTIAL_PARTITION_SPEC_NO_SUPPORTED, op.getName());
            } else if (!conf.getBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING)) {
                throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_DISABLED);
            }
            for (Partition part : getPartitions(tab, partSpec, true)) {
                outputs.add(new WriteEntity(part, writeType));
            }
        }
    }
    if (desc != null) {
        validateAlterTableType(tab, op, desc.getExpectView());
        // validate Unset Non Existed Table Properties
        if (op == AlterTableDesc.AlterTableTypes.DROPPROPS && !desc.getIsDropIfExists()) {
            Map<String, String> tableParams = tab.getTTable().getParameters();
            for (String currKey : desc.getProps().keySet()) {
                if (!tableParams.containsKey(currKey)) {
                    String errorMsg = "The following property " + currKey + " does not exist in " + tab.getTableName();
                    throw new SemanticException(ErrorMsg.ALTER_TBL_UNSET_NON_EXIST_PROPERTY.getMsg(errorMsg));
                }
            }
        }
    }
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) WriteType(org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType) Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) Table(org.apache.hadoop.hive.ql.metadata.Table) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Aggregations

ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)3 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)3 WriteType (org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType)3 Partition (org.apache.hadoop.hive.ql.metadata.Partition)3 Table (org.apache.hadoop.hive.ql.metadata.Table)3 AlterTableExchangePartition (org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition)2 DummyPartition (org.apache.hadoop.hive.ql.metadata.DummyPartition)1 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)1 CalciteSemanticException (org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException)1