Search in sources :

Example 1 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class HiveIcebergMetaHook method setupAlterOperationType.

private void setupAlterOperationType(org.apache.hadoop.hive.metastore.api.Table hmsTable, EnvironmentContext context) throws MetaException {
    TableName tableName = new TableName(hmsTable.getCatName(), hmsTable.getDbName(), hmsTable.getTableName());
    if (context == null || context.getProperties() == null) {
        throw new MetaException("ALTER TABLE operation type on Iceberg table " + tableName + " could not be determined.");
    }
    String stringOpType = context.getProperties().get(ALTER_TABLE_OPERATION_TYPE);
    if (stringOpType != null) {
        currentAlterTableOp = AlterTableType.valueOf(stringOpType);
        if (SUPPORTED_ALTER_OPS.stream().noneMatch(op -> op.equals(currentAlterTableOp))) {
            throw new MetaException("Unsupported ALTER TABLE operation type on Iceberg table " + tableName + ", must be one of: " + SUPPORTED_ALTER_OPS);
        }
    }
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 2 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class CreateViewOperation method createViewObject.

private Table createViewObject() throws HiveException {
    TableName name = HiveTableName.of(desc.getViewName());
    Table view = new Table(name.getDb(), name.getTable());
    view.setViewOriginalText(desc.getOriginalText());
    view.setViewExpandedText(desc.getExpandedText());
    view.setTableType(TableType.VIRTUAL_VIEW);
    view.setSerializationLib(null);
    view.clearSerDeInfo();
    view.setFields(desc.getSchema());
    if (desc.getComment() != null) {
        view.setProperty("comment", desc.getComment());
    }
    if (desc.getProperties() != null) {
        view.getParameters().putAll(desc.getProperties());
    }
    if (!CollectionUtils.isEmpty(desc.getPartitionColumns())) {
        view.setPartCols(desc.getPartitionColumns());
    }
    StorageFormat storageFormat = new StorageFormat(context.getConf());
    storageFormat.fillDefaultStorageFormat(false, false);
    view.setInputFormatClass(storageFormat.getInputFormat());
    view.setOutputFormatClass(storageFormat.getOutputFormat());
    if (desc.getOwnerName() != null) {
        view.setOwner(desc.getOwnerName());
    }
    // Sets the column state for the create view statement (false since it is a creation).
    // Similar to logic in CreateTableDesc.
    StatsSetupConst.setStatsStateForCreateTable(view.getTTable().getParameters(), null, StatsSetupConst.FALSE);
    return view;
}
Also used : HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) TableName(org.apache.hadoop.hive.common.TableName) Table(org.apache.hadoop.hive.ql.metadata.Table) StorageFormat(org.apache.hadoop.hive.ql.parse.StorageFormat)

Example 3 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AlterMaterializedViewRebuildAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    if (mvRebuildMode != MaterializationRebuildMode.NONE) {
        super.analyzeInternal(root);
        return;
    }
    ASTNode tableTree = (ASTNode) root.getChild(0);
    TableName tableName = getQualifiedTableName(tableTree);
    // now. However query scheduler requires the fully qualified table name.
    if (ctx.isScheduledQuery()) {
        unparseTranslator.addTableNameTranslation(tableTree, SessionState.get().getCurrentDatabase());
        return;
    }
    try {
        Boolean outdated = db.isOutdatedMaterializedView(getTxnMgr(), tableName);
        if (outdated != null && !outdated) {
            String msg = String.format("Materialized view %s.%s is up to date. Skipping rebuild.", tableName.getDb(), tableName.getTable());
            LOG.info(msg);
            console.printInfo(msg, false);
            return;
        }
    } catch (HiveException e) {
        LOG.warn("Error while checking materialized view " + tableName.getDb() + "." + tableName.getTable(), e);
    }
    ASTNode rewrittenAST = getRewrittenAST(tableName);
    mvRebuildMode = MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD;
    mvRebuildDbName = tableName.getDb();
    mvRebuildName = tableName.getTable();
    LOG.debug("Rebuilding materialized view " + tableName.getNotEmptyDbTable());
    super.analyzeInternal(rewrittenAST);
    queryState.setCommandType(HiveOperation.ALTER_MATERIALIZED_VIEW_REBUILD);
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode)

Example 4 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class DriverTxnHandler method setWriteIdForAcidDdl.

private boolean setWriteIdForAcidDdl() throws SemanticException, LockException {
    DDLDescWithWriteId acidDdlDesc = driverContext.getPlan().getAcidDdlDesc();
    boolean hasAcidDdl = acidDdlDesc != null && acidDdlDesc.mayNeedWriteId();
    if (hasAcidDdl) {
        String fqTableName = acidDdlDesc.getFullTableName();
        TableName tableName = HiveTableName.of(fqTableName);
        long writeId = driverContext.getTxnManager().getTableWriteId(tableName.getDb(), tableName.getTable());
        acidDdlDesc.setWriteId(writeId);
    }
    return hasAcidDdl;
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) DDLDescWithWriteId(org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId)

Example 5 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class DriverTxnHandler method setWriteIdForAcidFileSinks.

void setWriteIdForAcidFileSinks() throws SemanticException, LockException {
    if (!driverContext.getPlan().getAcidSinks().isEmpty()) {
        List<FileSinkDesc> acidSinks = new ArrayList<>(driverContext.getPlan().getAcidSinks());
        // sorting makes tests easier to write since file names and ROW__IDs depend on statementId
        // so this makes (file name -> data) mapping stable
        acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) -> fsd1.getDirName().compareTo(fsd2.getDirName()));
        // If the direct insert is on, sort the FSOs by moveTaskId as well because the dir is the same for all except the union use cases.
        boolean isDirectInsertOn = false;
        for (FileSinkDesc acidSink : acidSinks) {
            if (acidSink.isDirectInsert()) {
                isDirectInsertOn = true;
                break;
            }
        }
        if (isDirectInsertOn) {
            acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) -> fsd1.getMoveTaskId().compareTo(fsd2.getMoveTaskId()));
        }
        int maxStmtId = -1;
        for (FileSinkDesc acidSink : acidSinks) {
            TableDesc tableInfo = acidSink.getTableInfo();
            TableName tableName = HiveTableName.of(tableInfo.getTableName());
            long writeId = driverContext.getTxnManager().getTableWriteId(tableName.getDb(), tableName.getTable());
            acidSink.setTableWriteId(writeId);
            /**
             * it's possible to have > 1 FileSink writing to the same table/partition
             * e.g. Merge stmt, multi-insert stmt when mixing DP and SP writes
             * Insert ... Select ... Union All Select ... using
             * {@link org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator#UNION_SUDBIR_PREFIX}
             */
            acidSink.setStatementId(driverContext.getTxnManager().getStmtIdAndIncrement());
            maxStmtId = Math.max(acidSink.getStatementId(), maxStmtId);
            String unionAllSubdir = "/" + AbstractFileMergeOperator.UNION_SUDBIR_PREFIX;
            if (acidSink.getInsertOverwrite() && acidSink.getDirName().toString().contains(unionAllSubdir) && acidSink.isFullAcidTable()) {
                throw new UnsupportedOperationException("QueryId=" + driverContext.getPlan().getQueryId() + " is not supported due to OVERWRITE and UNION ALL.  Please use truncate + insert");
            }
        }
        if (HiveConf.getBoolVar(driverContext.getConf(), ConfVars.HIVE_EXTEND_BUCKET_ID_RANGE)) {
            for (FileSinkDesc each : acidSinks) {
                each.setMaxStmtId(maxStmtId);
            }
        }
    }
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) HiveTableName(org.apache.hadoop.hive.ql.parse.HiveTableName) FileSinkDesc(org.apache.hadoop.hive.ql.plan.FileSinkDesc) ArrayList(java.util.ArrayList) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc)

Aggregations

TableName (org.apache.hadoop.hive.common.TableName)47 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)22 ArrayList (java.util.ArrayList)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)11 HiveTableName (org.apache.hadoop.hive.ql.parse.HiveTableName)10 HashMap (java.util.HashMap)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 List (java.util.List)7 Path (org.apache.hadoop.fs.Path)7 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)7 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)6