Search in sources :

Example 26 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class TableBuilder method build.

public Table build(Configuration conf) throws MetaException {
    if (tableName == null) {
        throw new MetaException("You must set the table name");
    }
    if (ownerType == null) {
        ownerType = PrincipalType.USER;
    }
    if (owner == null) {
        try {
            owner = SecurityUtils.getUser();
        } catch (IOException e) {
            throw MetaStoreUtils.newMetaException(e);
        }
    }
    if (catName == null)
        catName = MetaStoreUtils.getDefaultCatalog(conf);
    Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(), partCols, tableParams, viewOriginalText, viewExpandedText, type);
    if (rewriteEnabled)
        t.setRewriteEnabled(true);
    if (temporary)
        t.setTemporary(temporary);
    t.setCatName(catName);
    if (!mvReferencedTables.isEmpty()) {
        Set<String> tablesUsed = mvReferencedTables.stream().map(sourceTable -> TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName())).collect(Collectors.toSet());
        CreationMetadata cm = new CreationMetadata(catName, dbName, tableName, tablesUsed);
        cm.setSourceTables(mvReferencedTables);
        if (mvValidTxnList != null)
            cm.setValidTxnList(mvValidTxnList);
        t.setCreationMetadata(cm);
    }
    return t;
}
Also used : SecurityUtils(org.apache.hadoop.hive.metastore.utils.SecurityUtils) CreationMetadata(org.apache.hadoop.hive.metastore.api.CreationMetadata) MetaStoreUtils(org.apache.hadoop.hive.metastore.utils.MetaStoreUtils) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) TException(org.apache.thrift.TException) Set(java.util.Set) PrincipalType(org.apache.hadoop.hive.metastore.api.PrincipalType) IOException(java.io.IOException) HashMap(java.util.HashMap) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) List(java.util.List) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) Configuration(org.apache.hadoop.conf.Configuration) Map(java.util.Map) TableType(org.apache.hadoop.hive.metastore.TableType) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) TableName(org.apache.hadoop.hive.common.TableName) Database(org.apache.hadoop.hive.metastore.api.Database) CreationMetadata(org.apache.hadoop.hive.metastore.api.CreationMetadata) Table(org.apache.hadoop.hive.metastore.api.Table) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) IOException(java.io.IOException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException)

Example 27 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class HiveMaterializedViewUtils method isOutdatedMaterializedView.

/**
 * Utility method that returns whether a materialized view is outdated (true), not outdated
 * (false), or it cannot be determined (null). The latest case may happen e.g. when the
 * materialized view definition uses external tables.
 */
public static Boolean isOutdatedMaterializedView(String validTxnsList, HiveTxnManager txnMgr, Set<TableName> tablesUsed, Table materializedViewTable) throws LockException {
    List<String> tablesUsedNames = tablesUsed.stream().map(tableName -> TableName.getDbTable(tableName.getDb(), tableName.getTable())).collect(Collectors.toList());
    ValidTxnWriteIdList currentTxnWriteIds = txnMgr.getValidWriteIds(tablesUsedNames, validTxnsList);
    if (currentTxnWriteIds == null) {
        LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain current txn ids");
        return null;
    }
    MaterializedViewMetadata mvMetadata = materializedViewTable.getMVMetadata();
    Set<String> storedTablesUsed = materializedViewTable.getMVMetadata().getSourceTableFullNames();
    if (mvMetadata.getValidTxnList() == null || mvMetadata.getValidTxnList().isEmpty()) {
        LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain materialization txn ids");
        return null;
    }
    boolean ignore = false;
    ValidTxnWriteIdList mvTxnWriteIds = new ValidTxnWriteIdList(mvMetadata.getValidTxnList());
    for (String fullyQualifiedTableName : tablesUsedNames) {
        // existing tables with an append-columns only join, i.e., PK-FK + not null.
        if (!storedTablesUsed.contains(fullyQualifiedTableName)) {
            continue;
        }
        ValidWriteIdList tableCurrentWriteIds = currentTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
        if (tableCurrentWriteIds == null) {
            // Uses non-transactional table, cannot be considered
            LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as it is outdated and cannot be considered for " + " rewriting because it uses non-transactional table " + fullyQualifiedTableName);
            ignore = true;
            break;
        }
        ValidWriteIdList tableWriteIds = mvTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
        if (tableWriteIds == null) {
            // This should not happen, but we ignore for safety
            LOG.warn("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as details about txn ids for table " + fullyQualifiedTableName + " could not be found in " + mvTxnWriteIds);
            ignore = true;
            break;
        }
        if (!TxnIdUtils.checkEquivalentWriteIds(tableCurrentWriteIds, tableWriteIds)) {
            LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " contents are outdated");
            return true;
        }
    }
    if (ignore) {
        return null;
    }
    return false;
}
Also used : ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) HiveGroupingID(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID) LoggerFactory(org.slf4j.LoggerFactory) HiveProject(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) BigDecimal(java.math.BigDecimal) RexNode(org.apache.calcite.rex.RexNode) RelBuilder(org.apache.calcite.tools.RelBuilder) HiveRelOptMaterialization(org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization) Group(org.apache.calcite.rel.core.Aggregate.Group) RelOptCluster(org.apache.calcite.plan.RelOptCluster) ImmutableBitSet(org.apache.calcite.util.ImmutableBitSet) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) TxnIdUtils(org.apache.hive.common.util.TxnIdUtils) Set(java.util.Set) RelVisitor(org.apache.calcite.rel.RelVisitor) Collectors(java.util.stream.Collectors) SessionState(org.apache.hadoop.hive.ql.session.SessionState) RexInputRef(org.apache.calcite.rex.RexInputRef) List(java.util.List) SqlStdOperatorTable(org.apache.calcite.sql.fun.SqlStdOperatorTable) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) MaterializedViewMetadata(org.apache.hadoop.hive.ql.metadata.MaterializedViewMetadata) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) HiveFilter(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) Project(org.apache.calcite.rel.core.Project) TableScan(org.apache.calcite.rel.core.TableScan) RelOptMaterialization(org.apache.calcite.plan.RelOptMaterialization) Filter(org.apache.calcite.rel.core.Filter) RelOptUtil(org.apache.calcite.plan.RelOptUtil) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) HiveAccessControlException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException) HiveRelFactories(org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories) DruidQuery(org.apache.calcite.adapter.druid.DruidQuery) TableName(org.apache.hadoop.hive.common.TableName) HiveAuthzContext(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext) BindableConvention(org.apache.calcite.interpreter.BindableConvention) Logger(org.slf4j.Logger) RexBuilder(org.apache.calcite.rex.RexBuilder) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) Table(org.apache.hadoop.hive.ql.metadata.Table) HepPlanner(org.apache.calcite.plan.hep.HepPlanner) HepProgramBuilder(org.apache.calcite.plan.hep.HepProgramBuilder) RelNode(org.apache.calcite.rel.RelNode) Aggregate(org.apache.calcite.rel.core.Aggregate) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) DruidSqlOperatorConverter(org.apache.hadoop.hive.ql.parse.DruidSqlOperatorConverter) Preconditions(com.google.common.base.Preconditions) AggregateCall(org.apache.calcite.rel.core.AggregateCall) Collections(java.util.Collections) HiveOperationType(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType) MaterializedViewMetadata(org.apache.hadoop.hive.ql.metadata.MaterializedViewMetadata) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList)

Example 28 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class TruncateTableHandler method handle.

@Override
public List<Task<?>> handle(Context context) throws SemanticException {
    AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload());
    final TableName tName = TableName.fromString(msg.getTable(), null, context.isDbNameEmpty() ? msg.getDB() : context.dbName);
    TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, null, context.eventOnlyReplicationSpec());
    truncateTableDesc.setWriteId(msg.getWriteId());
    Task<DDLWork> truncateTableTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
    context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
    updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), null);
    try {
        return ReplUtils.addChildTask(truncateTableTask);
    } catch (Exception e) {
        throw new SemanticException(e.getMessage());
    }
}
Also used : TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) TruncateTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.truncate.TruncateTableDesc) AlterTableMessage(org.apache.hadoop.hive.metastore.messaging.AlterTableMessage) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 29 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class TruncatePartitionHandler method handle.

@Override
public List<Task<?>> handle(Context context) throws SemanticException {
    AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
    final TableName tName = TableName.fromString(msg.getTable(), null, context.isDbNameEmpty() ? msg.getDB() : context.dbName);
    Map<String, String> partSpec = new LinkedHashMap<>();
    org.apache.hadoop.hive.metastore.api.Table tblObj;
    try {
        tblObj = msg.getTableObj();
        Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
        for (FieldSchema fs : tblObj.getPartitionKeys()) {
            partSpec.put(fs.getName(), afterIterator.next());
        }
    } catch (Exception e) {
        if (!(e instanceof SemanticException)) {
            throw new SemanticException("Error reading message members", e);
        } else {
            throw (SemanticException) e;
        }
    }
    TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, partSpec, context.eventOnlyReplicationSpec());
    truncateTableDesc.setWriteId(msg.getWriteId());
    Task<DDLWork> truncatePtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
    context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
    updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), partSpec);
    try {
        return ReplUtils.addChildTask(truncatePtnTask);
    } catch (Exception e) {
        throw new SemanticException(e.getMessage());
    }
}
Also used : TruncateTableDesc(org.apache.hadoop.hive.ql.ddl.table.misc.truncate.TruncateTableDesc) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) LinkedHashMap(java.util.LinkedHashMap) TableName(org.apache.hadoop.hive.common.TableName) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) AlterPartitionMessage(org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 30 with TableName

use of org.apache.hadoop.hive.common.TableName in project hive by apache.

the class AddPrimaryKeyHandler method handle.

@Override
public List<Task<?>> handle(Context context) throws SemanticException {
    AddPrimaryKeyMessage msg = deserializer.getAddPrimaryKeyMessage(context.dmd.getPayload());
    List<SQLPrimaryKey> pks;
    try {
        pks = msg.getPrimaryKeys();
    } catch (Exception e) {
        if (!(e instanceof SemanticException)) {
            throw new SemanticException("Error reading message members", e);
        } else {
            throw (SemanticException) e;
        }
    }
    List<Task<?>> tasks = new ArrayList<Task<?>>();
    if (pks.isEmpty()) {
        return tasks;
    }
    final String actualDbName = context.isDbNameEmpty() ? pks.get(0).getTable_db() : context.dbName;
    final String actualTblName = pks.get(0).getTable_name();
    final TableName tName = TableName.fromString(actualTblName, null, actualDbName);
    for (SQLPrimaryKey pk : pks) {
        pk.setTable_db(actualDbName);
        pk.setTable_name(actualTblName);
    }
    Constraints constraints = new Constraints(pks, null, null, null, null, null);
    AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints);
    Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
    tasks.add(addConstraintsTask);
    context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
    updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
    return Collections.singletonList(addConstraintsTask);
}
Also used : AddPrimaryKeyMessage(org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) Task(org.apache.hadoop.hive.ql.exec.Task) ArrayList(java.util.ArrayList) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) TableName(org.apache.hadoop.hive.common.TableName) Constraints(org.apache.hadoop.hive.ql.ddl.table.constraint.Constraints) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) AlterTableAddConstraintDesc(org.apache.hadoop.hive.ql.ddl.table.constraint.add.AlterTableAddConstraintDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

TableName (org.apache.hadoop.hive.common.TableName)47 DDLWork (org.apache.hadoop.hive.ql.ddl.DDLWork)22 ArrayList (java.util.ArrayList)16 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)11 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)11 HiveTableName (org.apache.hadoop.hive.ql.parse.HiveTableName)10 HashMap (java.util.HashMap)9 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)9 HashSet (java.util.HashSet)8 IOException (java.io.IOException)7 List (java.util.List)7 Path (org.apache.hadoop.fs.Path)7 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)7 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)6