Search in sources :

Example 21 with Hive

use of org.apache.hadoop.hive.ql.metadata.Hive in project hive by apache.

the class DDLTask method execute.

@Override
public int execute(DriverContext driverContext) {
    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
        return 0;
    }
    // Create the db
    Hive db;
    try {
        db = Hive.get(conf);
        CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
        if (null != createDatabaseDesc) {
            return createDatabase(db, createDatabaseDesc);
        }
        DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
        if (dropDatabaseDesc != null) {
            return dropDatabase(db, dropDatabaseDesc);
        }
        LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
        if (lockDatabaseDesc != null) {
            return lockDatabase(db, lockDatabaseDesc);
        }
        UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
        if (unlockDatabaseDesc != null) {
            return unlockDatabase(db, unlockDatabaseDesc);
        }
        SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
        if (switchDatabaseDesc != null) {
            return switchDatabase(db, switchDatabaseDesc);
        }
        DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
        if (descDatabaseDesc != null) {
            return descDatabase(db, descDatabaseDesc);
        }
        AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
        if (alterDatabaseDesc != null) {
            return alterDatabase(db, alterDatabaseDesc);
        }
        CreateTableDesc crtTbl = work.getCreateTblDesc();
        if (crtTbl != null) {
            return createTable(db, crtTbl);
        }
        CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
        if (crtTblLike != null) {
            return createTableLike(db, crtTblLike);
        }
        DropTableDesc dropTbl = work.getDropTblDesc();
        if (dropTbl != null) {
            dropTableOrPartitions(db, dropTbl);
            return 0;
        }
        AlterTableDesc alterTbl = work.getAlterTblDesc();
        if (alterTbl != null) {
            if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) {
                // no alter, the table is missing either due to drop/rename which follows the alter.
                // or the existing table is newer than our update.
                LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName());
                return 0;
            }
            if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
                return dropConstraint(db, alterTbl);
            } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
                return addConstraints(db, alterTbl);
            } else {
                return alterTable(db, alterTbl);
            }
        }
        CreateViewDesc crtView = work.getCreateViewDesc();
        if (crtView != null) {
            return createView(db, crtView);
        }
        AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
        if (addPartitionDesc != null) {
            return addPartitions(db, addPartitionDesc);
        }
        RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
        if (renamePartitionDesc != null) {
            return renamePartition(db, renamePartitionDesc);
        }
        AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
        if (simpleDesc != null) {
            if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
                return touch(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
                return archive(db, simpleDesc, driverContext);
            } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
                return unarchive(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
                return compact(db, simpleDesc);
            }
        }
        MsckDesc msckDesc = work.getMsckDesc();
        if (msckDesc != null) {
            return msck(db, msckDesc);
        }
        DescTableDesc descTbl = work.getDescTblDesc();
        if (descTbl != null) {
            return describeTable(db, descTbl);
        }
        DescFunctionDesc descFunc = work.getDescFunctionDesc();
        if (descFunc != null) {
            return describeFunction(db, descFunc);
        }
        ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
        if (showDatabases != null) {
            return showDatabases(db, showDatabases);
        }
        ShowTablesDesc showTbls = work.getShowTblsDesc();
        if (showTbls != null) {
            return showTablesOrViews(db, showTbls);
        }
        ShowColumnsDesc showCols = work.getShowColumnsDesc();
        if (showCols != null) {
            return showColumns(db, showCols);
        }
        ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
        if (showTblStatus != null) {
            return showTableStatus(db, showTblStatus);
        }
        ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
        if (showTblProperties != null) {
            return showTableProperties(db, showTblProperties);
        }
        ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
        if (showFuncs != null) {
            return showFunctions(db, showFuncs);
        }
        ShowLocksDesc showLocks = work.getShowLocksDesc();
        if (showLocks != null) {
            return showLocks(db, showLocks);
        }
        ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
        if (compactionsDesc != null) {
            return showCompactions(db, compactionsDesc);
        }
        ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
        if (txnsDesc != null) {
            return showTxns(db, txnsDesc);
        }
        AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
        if (abortTxnsDesc != null) {
            return abortTxns(db, abortTxnsDesc);
        }
        LockTableDesc lockTbl = work.getLockTblDesc();
        if (lockTbl != null) {
            return lockTable(db, lockTbl);
        }
        UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
        if (unlockTbl != null) {
            return unlockTable(db, unlockTbl);
        }
        ShowPartitionsDesc showParts = work.getShowPartsDesc();
        if (showParts != null) {
            return showPartitions(db, showParts);
        }
        ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
        if (showCreateDb != null) {
            return showCreateDatabase(db, showCreateDb);
        }
        ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
        if (showCreateTbl != null) {
            return showCreateTable(db, showCreateTbl);
        }
        ShowConfDesc showConf = work.getShowConfDesc();
        if (showConf != null) {
            return showConf(db, showConf);
        }
        RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
        if (roleDDLDesc != null) {
            return roleDDL(db, roleDDLDesc);
        }
        GrantDesc grantDesc = work.getGrantDesc();
        if (grantDesc != null) {
            return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
        }
        RevokeDesc revokeDesc = work.getRevokeDesc();
        if (revokeDesc != null) {
            return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
        }
        ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
        if (showGrantDesc != null) {
            return showGrants(db, showGrantDesc);
        }
        GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
        if (grantOrRevokeRoleDDL != null) {
            return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
        }
        AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
        if (mergeFilesDesc != null) {
            return mergeFiles(db, mergeFilesDesc, driverContext);
        }
        AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
        if (alterPartDesc != null) {
            return alterTableAlterPart(db, alterPartDesc);
        }
        TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
        if (truncateTableDesc != null) {
            return truncateTable(db, truncateTableDesc);
        }
        AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
        if (alterTableExchangePartition != null) {
            return exchangeTablePartition(db, alterTableExchangePartition);
        }
        CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
        if (cacheMetadataDesc != null) {
            return cacheMetadata(db, cacheMetadataDesc);
        }
        InsertTableDesc insertTableDesc = work.getInsertTableDesc();
        if (insertTableDesc != null) {
            return insertCommitWork(db, insertTableDesc);
        }
        PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
        if (preInsertTableDesc != null) {
            return preInsertWork(db, preInsertTableDesc);
        }
        KillQueryDesc killQueryDesc = work.getKillQueryDesc();
        if (killQueryDesc != null) {
            return killQuery(db, killQueryDesc);
        }
        if (work.getCreateResourcePlanDesc() != null) {
            return createResourcePlan(db, work.getCreateResourcePlanDesc());
        }
        if (work.getShowResourcePlanDesc() != null) {
            return showResourcePlans(db, work.getShowResourcePlanDesc());
        }
        if (work.getAlterResourcePlanDesc() != null) {
            return alterResourcePlan(db, work.getAlterResourcePlanDesc());
        }
        if (work.getDropResourcePlanDesc() != null) {
            return dropResourcePlan(db, work.getDropResourcePlanDesc());
        }
        if (work.getCreateWMTriggerDesc() != null) {
            return createWMTrigger(db, work.getCreateWMTriggerDesc());
        }
        if (work.getAlterWMTriggerDesc() != null) {
            return alterWMTrigger(db, work.getAlterWMTriggerDesc());
        }
        if (work.getDropWMTriggerDesc() != null) {
            return dropWMTrigger(db, work.getDropWMTriggerDesc());
        }
        if (work.getWmPoolDesc() != null) {
            return createOrAlterWMPool(db, work.getWmPoolDesc());
        }
        if (work.getDropWMPoolDesc() != null) {
            return dropWMPool(db, work.getDropWMPoolDesc());
        }
        if (work.getWmMappingDesc() != null) {
            return createOrAlterWMMapping(db, work.getWmMappingDesc());
        }
        if (work.getDropWMMappingDesc() != null) {
            return dropWMMapping(db, work.getDropWMMappingDesc());
        }
        if (work.getTriggerToPoolMappingDesc() != null) {
            return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc());
        }
        if (work.getAlterMaterializedViewDesc() != null) {
            return alterMaterializedView(db, work.getAlterMaterializedViewDesc());
        }
    } catch (Throwable e) {
        failed(e);
        return 1;
    }
    assert false;
    return 0;
}
Also used : DropDatabaseDesc(org.apache.hadoop.hive.ql.plan.DropDatabaseDesc) LockTableDesc(org.apache.hadoop.hive.ql.plan.LockTableDesc) UnlockTableDesc(org.apache.hadoop.hive.ql.plan.UnlockTableDesc) ShowTblPropertiesDesc(org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc) RenamePartitionDesc(org.apache.hadoop.hive.ql.plan.RenamePartitionDesc) ShowDatabasesDesc(org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc) DescFunctionDesc(org.apache.hadoop.hive.ql.plan.DescFunctionDesc) CreateViewDesc(org.apache.hadoop.hive.ql.plan.CreateViewDesc) ShowTxnsDesc(org.apache.hadoop.hive.ql.plan.ShowTxnsDesc) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc) ShowCompactionsDesc(org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) GrantDesc(org.apache.hadoop.hive.ql.plan.GrantDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) AlterTableSimpleDesc(org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc) DescTableDesc(org.apache.hadoop.hive.ql.plan.DescTableDesc) GrantRevokeRoleDDL(org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL) TruncateTableDesc(org.apache.hadoop.hive.ql.plan.TruncateTableDesc) DropTableDesc(org.apache.hadoop.hive.ql.plan.DropTableDesc) ShowTablesDesc(org.apache.hadoop.hive.ql.plan.ShowTablesDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) ShowFunctionsDesc(org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc) AbortTxnsDesc(org.apache.hadoop.hive.ql.plan.AbortTxnsDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) CacheMetadataDesc(org.apache.hadoop.hive.ql.plan.CacheMetadataDesc) UnlockDatabaseDesc(org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc) RevokeDesc(org.apache.hadoop.hive.ql.plan.RevokeDesc) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) MsckDesc(org.apache.hadoop.hive.ql.plan.MsckDesc) ShowLocksDesc(org.apache.hadoop.hive.ql.plan.ShowLocksDesc) SwitchDatabaseDesc(org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc) ShowConfDesc(org.apache.hadoop.hive.ql.plan.ShowConfDesc) ShowPartitionsDesc(org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc) CreateTableLikeDesc(org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc) ShowTableStatusDesc(org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) AlterTableAlterPartDesc(org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc) RoleDDLDesc(org.apache.hadoop.hive.ql.plan.RoleDDLDesc) AlterTablePartMergeFilesDesc(org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) LockDatabaseDesc(org.apache.hadoop.hive.ql.plan.LockDatabaseDesc) CreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) InsertTableDesc(org.apache.hadoop.hive.ql.plan.InsertTableDesc) KillQueryDesc(org.apache.hadoop.hive.ql.plan.KillQueryDesc) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc) DescDatabaseDesc(org.apache.hadoop.hive.ql.plan.DescDatabaseDesc) ShowColumnsDesc(org.apache.hadoop.hive.ql.plan.ShowColumnsDesc)

Example 22 with Hive

use of org.apache.hadoop.hive.ql.metadata.Hive in project hive by apache.

the class ExportTask method execute.

@Override
protected int execute(DriverContext driverContext) {
    try {
        // Also creates the root directory
        TableExport.Paths exportPaths = new TableExport.Paths(work.getAstRepresentationForErrorMsg(), work.getExportRootDir(), conf, false);
        Hive db = getHive();
        LOG.debug("Exporting data to: {}", exportPaths.getExportRootDir());
        TableExport tableExport = new TableExport(exportPaths, work.getTableSpec(), work.getReplicationSpec(), db, null, conf);
        if (!tableExport.write()) {
            throw new SemanticException(ErrorMsg.EXIM_FOR_NON_NATIVE.getMsg());
        }
    } catch (Exception e) {
        LOG.error("failed", e);
        setException(e);
        return 1;
    }
    return 0;
}
Also used : TableExport(org.apache.hadoop.hive.ql.parse.repl.dump.TableExport) Hive(org.apache.hadoop.hive.ql.metadata.Hive) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 23 with Hive

use of org.apache.hadoop.hive.ql.metadata.Hive in project hive by apache.

the class StatsTask method execute.

@Override
public int execute(DriverContext driverContext) {
    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
        return 0;
    }
    if (work.isAggregating() && work.isFooterScan()) {
        throw new RuntimeException("Can not have both basic stats work and stats no job work!");
    }
    int ret = 0;
    try {
        if (work.isFooterScan()) {
            work.getBasicStatsNoJobWork().setPartitions(work.getPartitions());
        }
        Hive db = getHive();
        Table tbl = getTable(db);
        for (IStatsProcessor task : processors) {
            task.setDpPartSpecs(dpPartSpecs);
            ret = task.process(db, tbl);
            if (ret != 0) {
                return ret;
            }
        }
    } catch (Exception e) {
        LOG.error("Failed to run stats task", e);
        return 1;
    }
    return 0;
}
Also used : Hive(org.apache.hadoop.hive.ql.metadata.Hive) Table(org.apache.hadoop.hive.ql.metadata.Table) IStatsProcessor(org.apache.hadoop.hive.ql.stats.IStatsProcessor) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException)

Example 24 with Hive

use of org.apache.hadoop.hive.ql.metadata.Hive in project hive by apache.

the class Driver method doAuthorization.

/**
 * Do authorization using post semantic analysis information in the semantic analyzer
 * The original command is also passed so that authorization interface can provide
 * more useful information in logs.
 * @param sem SemanticAnalyzer used to parse input query
 * @param command input query
 * @throws HiveException
 * @throws AuthorizationException
 */
public static void doAuthorization(HiveOperation op, BaseSemanticAnalyzer sem, String command) throws HiveException, AuthorizationException {
    SessionState ss = SessionState.get();
    Hive db = sem.getDb();
    Set<ReadEntity> additionalInputs = new HashSet<ReadEntity>();
    for (Entity e : sem.getInputs()) {
        if (e.getType() == Entity.Type.PARTITION) {
            additionalInputs.add(new ReadEntity(e.getTable()));
        }
    }
    Set<WriteEntity> additionalOutputs = new HashSet<WriteEntity>();
    for (WriteEntity e : sem.getOutputs()) {
        if (e.getType() == Entity.Type.PARTITION) {
            additionalOutputs.add(new WriteEntity(e.getTable(), e.getWriteType()));
        }
    }
    // The following union operation returns a union, which traverses over the
    // first set once and then  then over each element of second set, in order,
    // that is not contained in first. This means it doesn't replace anything
    // in first set, and would preserve the WriteType in WriteEntity in first
    // set in case of outputs list.
    Set<ReadEntity> inputs = Sets.union(sem.getInputs(), additionalInputs);
    Set<WriteEntity> outputs = Sets.union(sem.getOutputs(), additionalOutputs);
    if (ss.isAuthorizationModeV2()) {
        // get mapping of tables to columns used
        ColumnAccessInfo colAccessInfo = sem.getColumnAccessInfo();
        // colAccessInfo is set only in case of SemanticAnalyzer
        Map<String, List<String>> selectTab2Cols = colAccessInfo != null ? colAccessInfo.getTableToColumnAccessMap() : null;
        Map<String, List<String>> updateTab2Cols = sem.getUpdateColumnAccessInfo() != null ? sem.getUpdateColumnAccessInfo().getTableToColumnAccessMap() : null;
        doAuthorizationV2(ss, op, inputs, outputs, command, selectTab2Cols, updateTab2Cols);
        return;
    }
    if (op == null) {
        throw new HiveException("Operation should not be null");
    }
    HiveAuthorizationProvider authorizer = ss.getAuthorizer();
    if (op.equals(HiveOperation.CREATEDATABASE)) {
        authorizer.authorize(op.getInputRequiredPrivileges(), op.getOutputRequiredPrivileges());
    } else if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) || op.equals(HiveOperation.CREATETABLE)) {
        authorizer.authorize(db.getDatabase(SessionState.get().getCurrentDatabase()), null, HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
    } else {
        if (op.equals(HiveOperation.IMPORT)) {
            ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem;
            if (!isa.existsTable()) {
                authorizer.authorize(db.getDatabase(SessionState.get().getCurrentDatabase()), null, HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
            }
        }
    }
    if (outputs != null && outputs.size() > 0) {
        for (WriteEntity write : outputs) {
            if (write.isDummy() || write.isPathType()) {
                continue;
            }
            if (write.getType() == Entity.Type.DATABASE) {
                if (!op.equals(HiveOperation.IMPORT)) {
                    // We skip DB check for import here because we already handle it above
                    // as a CTAS check.
                    authorizer.authorize(write.getDatabase(), null, op.getOutputRequiredPrivileges());
                }
                continue;
            }
            if (write.getType() == WriteEntity.Type.PARTITION) {
                Partition part = db.getPartition(write.getTable(), write.getPartition().getSpec(), false);
                if (part != null) {
                    authorizer.authorize(write.getPartition(), null, op.getOutputRequiredPrivileges());
                    continue;
                }
            }
            if (write.getTable() != null) {
                authorizer.authorize(write.getTable(), null, op.getOutputRequiredPrivileges());
            }
        }
    }
    if (inputs != null && inputs.size() > 0) {
        Map<Table, List<String>> tab2Cols = new HashMap<Table, List<String>>();
        Map<Partition, List<String>> part2Cols = new HashMap<Partition, List<String>>();
        // determine if partition level privileges should be checked for input tables
        Map<String, Boolean> tableUsePartLevelAuth = new HashMap<String, Boolean>();
        for (ReadEntity read : inputs) {
            if (read.isDummy() || read.isPathType() || read.getType() == Entity.Type.DATABASE) {
                continue;
            }
            Table tbl = read.getTable();
            if ((read.getPartition() != null) || (tbl != null && tbl.isPartitioned())) {
                String tblName = tbl.getTableName();
                if (tableUsePartLevelAuth.get(tblName) == null) {
                    boolean usePartLevelPriv = (tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE") != null && ("TRUE".equalsIgnoreCase(tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))));
                    if (usePartLevelPriv) {
                        tableUsePartLevelAuth.put(tblName, Boolean.TRUE);
                    } else {
                        tableUsePartLevelAuth.put(tblName, Boolean.FALSE);
                    }
                }
            }
        }
        // column authorization is checked through table scan operators.
        getTablePartitionUsedColumns(op, sem, tab2Cols, part2Cols, tableUsePartLevelAuth);
        // cache the results for table authorization
        Set<String> tableAuthChecked = new HashSet<String>();
        for (ReadEntity read : inputs) {
            // if read is not direct, we do not need to check its autho.
            if (read.isDummy() || read.isPathType() || !read.isDirect()) {
                continue;
            }
            if (read.getType() == Entity.Type.DATABASE) {
                authorizer.authorize(read.getDatabase(), op.getInputRequiredPrivileges(), null);
                continue;
            }
            Table tbl = read.getTable();
            if (tbl.isView() && sem instanceof SemanticAnalyzer) {
                tab2Cols.put(tbl, sem.getColumnAccessInfo().getTableToColumnAccessMap().get(tbl.getCompleteName()));
            }
            if (read.getPartition() != null) {
                Partition partition = read.getPartition();
                tbl = partition.getTable();
                // use partition level authorization
                if (Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) {
                    List<String> cols = part2Cols.get(partition);
                    if (cols != null && cols.size() > 0) {
                        authorizer.authorize(partition.getTable(), partition, cols, op.getInputRequiredPrivileges(), null);
                    } else {
                        authorizer.authorize(partition, op.getInputRequiredPrivileges(), null);
                    }
                    continue;
                }
            }
            // partitions
            if (tbl != null && !tableAuthChecked.contains(tbl.getTableName()) && !(Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName())))) {
                List<String> cols = tab2Cols.get(tbl);
                if (cols != null && cols.size() > 0) {
                    authorizer.authorize(tbl, null, cols, op.getInputRequiredPrivileges(), null);
                } else {
                    authorizer.authorize(tbl, op.getInputRequiredPrivileges(), null);
                }
                tableAuthChecked.add(tbl.getTableName());
            }
        }
    }
}
Also used : HiveAuthorizationProvider(org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider) SessionState(org.apache.hadoop.hive.ql.session.SessionState) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Entity(org.apache.hadoop.hive.ql.hooks.Entity) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ImportSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) ArrayList(java.util.ArrayList) ValidTxnWriteIdList(org.apache.hadoop.hive.common.ValidTxnWriteIdList) ValidTxnList(org.apache.hadoop.hive.common.ValidTxnList) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) List(java.util.List) LinkedList(java.util.LinkedList) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) SemanticAnalyzer(org.apache.hadoop.hive.ql.parse.SemanticAnalyzer) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) ImportSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer) ColumnAccessInfo(org.apache.hadoop.hive.ql.parse.ColumnAccessInfo) ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) Hive(org.apache.hadoop.hive.ql.metadata.Hive)

Example 25 with Hive

use of org.apache.hadoop.hive.ql.metadata.Hive in project hive by apache.

the class TestQBCompact method init.

@BeforeClass
public static void init() throws Exception {
    queryState = new QueryState.Builder().build();
    conf = queryState.getConf();
    conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
    SessionState.start(conf);
    // Create a table so we can work against it
    Hive h = Hive.get(conf);
    List<String> cols = new ArrayList<String>();
    cols.add("a");
    List<String> partCols = new ArrayList<String>();
    partCols.add("ds");
    h.createTable("foo", cols, partCols, OrcInputFormat.class, OrcOutputFormat.class);
    Table t = h.getTable("foo");
    Map<String, String> partSpec = new HashMap<String, String>();
    partSpec.put("ds", "today");
    h.createPartition(t, partSpec);
}
Also used : Hive(org.apache.hadoop.hive.ql.metadata.Hive) Table(org.apache.hadoop.hive.ql.metadata.Table) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) BeforeClass(org.junit.BeforeClass)

Aggregations

Hive (org.apache.hadoop.hive.ql.metadata.Hive)49 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)23 Table (org.apache.hadoop.hive.ql.metadata.Table)16 ArrayList (java.util.ArrayList)10 Path (org.apache.hadoop.fs.Path)9 AddPartitionDesc (org.apache.hadoop.hive.ql.plan.AddPartitionDesc)9 PartitionResult (org.apache.hadoop.hive.ql.metadata.CheckResult.PartitionResult)8 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)8 Test (org.junit.Test)8 FileSystem (org.apache.hadoop.fs.FileSystem)5 HiveConf (org.apache.hadoop.hive.conf.HiveConf)5 Partition (org.apache.hadoop.hive.ql.metadata.Partition)5 IOException (java.io.IOException)4 PrincipalType (org.apache.hadoop.hive.metastore.api.PrincipalType)4 HashMap (java.util.HashMap)3 List (java.util.List)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 Database (org.apache.hadoop.hive.metastore.api.Database)3 ASTNode (org.apache.hadoop.hive.ql.parse.ASTNode)3 SessionState (org.apache.hadoop.hive.ql.session.SessionState)3