Search in sources :

Example 1 with TruncateTableDesc

use of org.apache.hadoop.hive.ql.plan.TruncateTableDesc in project hive by apache.

the class DDLTask method execute.

@Override
public int execute(DriverContext driverContext) {
    if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
        return 0;
    }
    // Create the db
    Hive db;
    try {
        db = Hive.get(conf);
        CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
        if (null != createDatabaseDesc) {
            return createDatabase(db, createDatabaseDesc);
        }
        DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
        if (dropDatabaseDesc != null) {
            return dropDatabase(db, dropDatabaseDesc);
        }
        LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
        if (lockDatabaseDesc != null) {
            return lockDatabase(db, lockDatabaseDesc);
        }
        UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
        if (unlockDatabaseDesc != null) {
            return unlockDatabase(db, unlockDatabaseDesc);
        }
        SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
        if (switchDatabaseDesc != null) {
            return switchDatabase(db, switchDatabaseDesc);
        }
        DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
        if (descDatabaseDesc != null) {
            return descDatabase(db, descDatabaseDesc);
        }
        AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
        if (alterDatabaseDesc != null) {
            return alterDatabase(db, alterDatabaseDesc);
        }
        CreateTableDesc crtTbl = work.getCreateTblDesc();
        if (crtTbl != null) {
            return createTable(db, crtTbl);
        }
        CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
        if (crtTblLike != null) {
            return createTableLike(db, crtTblLike);
        }
        DropTableDesc dropTbl = work.getDropTblDesc();
        if (dropTbl != null) {
            dropTableOrPartitions(db, dropTbl);
            return 0;
        }
        AlterTableDesc alterTbl = work.getAlterTblDesc();
        if (alterTbl != null) {
            if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) {
                // no alter, the table is missing either due to drop/rename which follows the alter.
                // or the existing table is newer than our update.
                LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName());
                return 0;
            }
            if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
                return dropConstraint(db, alterTbl);
            } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
                return addConstraints(db, alterTbl);
            } else {
                return alterTable(db, alterTbl);
            }
        }
        CreateViewDesc crtView = work.getCreateViewDesc();
        if (crtView != null) {
            return createView(db, crtView);
        }
        AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
        if (addPartitionDesc != null) {
            return addPartitions(db, addPartitionDesc);
        }
        RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
        if (renamePartitionDesc != null) {
            return renamePartition(db, renamePartitionDesc);
        }
        AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
        if (simpleDesc != null) {
            if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
                return touch(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
                return archive(db, simpleDesc, driverContext);
            } else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
                return unarchive(db, simpleDesc);
            } else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
                return compact(db, simpleDesc);
            }
        }
        MsckDesc msckDesc = work.getMsckDesc();
        if (msckDesc != null) {
            return msck(db, msckDesc);
        }
        DescTableDesc descTbl = work.getDescTblDesc();
        if (descTbl != null) {
            return describeTable(db, descTbl);
        }
        DescFunctionDesc descFunc = work.getDescFunctionDesc();
        if (descFunc != null) {
            return describeFunction(db, descFunc);
        }
        ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
        if (showDatabases != null) {
            return showDatabases(db, showDatabases);
        }
        ShowTablesDesc showTbls = work.getShowTblsDesc();
        if (showTbls != null) {
            return showTablesOrViews(db, showTbls);
        }
        ShowColumnsDesc showCols = work.getShowColumnsDesc();
        if (showCols != null) {
            return showColumns(db, showCols);
        }
        ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
        if (showTblStatus != null) {
            return showTableStatus(db, showTblStatus);
        }
        ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
        if (showTblProperties != null) {
            return showTableProperties(db, showTblProperties);
        }
        ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
        if (showFuncs != null) {
            return showFunctions(db, showFuncs);
        }
        ShowLocksDesc showLocks = work.getShowLocksDesc();
        if (showLocks != null) {
            return showLocks(db, showLocks);
        }
        ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
        if (compactionsDesc != null) {
            return showCompactions(db, compactionsDesc);
        }
        ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
        if (txnsDesc != null) {
            return showTxns(db, txnsDesc);
        }
        AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
        if (abortTxnsDesc != null) {
            return abortTxns(db, abortTxnsDesc);
        }
        LockTableDesc lockTbl = work.getLockTblDesc();
        if (lockTbl != null) {
            return lockTable(db, lockTbl);
        }
        UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
        if (unlockTbl != null) {
            return unlockTable(db, unlockTbl);
        }
        ShowPartitionsDesc showParts = work.getShowPartsDesc();
        if (showParts != null) {
            return showPartitions(db, showParts);
        }
        ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
        if (showCreateDb != null) {
            return showCreateDatabase(db, showCreateDb);
        }
        ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
        if (showCreateTbl != null) {
            return showCreateTable(db, showCreateTbl);
        }
        ShowConfDesc showConf = work.getShowConfDesc();
        if (showConf != null) {
            return showConf(db, showConf);
        }
        RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
        if (roleDDLDesc != null) {
            return roleDDL(db, roleDDLDesc);
        }
        GrantDesc grantDesc = work.getGrantDesc();
        if (grantDesc != null) {
            return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
        }
        RevokeDesc revokeDesc = work.getRevokeDesc();
        if (revokeDesc != null) {
            return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
        }
        ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
        if (showGrantDesc != null) {
            return showGrants(db, showGrantDesc);
        }
        GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
        if (grantOrRevokeRoleDDL != null) {
            return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
        }
        AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
        if (mergeFilesDesc != null) {
            return mergeFiles(db, mergeFilesDesc, driverContext);
        }
        AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
        if (alterPartDesc != null) {
            return alterTableAlterPart(db, alterPartDesc);
        }
        TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
        if (truncateTableDesc != null) {
            return truncateTable(db, truncateTableDesc);
        }
        AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
        if (alterTableExchangePartition != null) {
            return exchangeTablePartition(db, alterTableExchangePartition);
        }
        CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
        if (cacheMetadataDesc != null) {
            return cacheMetadata(db, cacheMetadataDesc);
        }
        InsertTableDesc insertTableDesc = work.getInsertTableDesc();
        if (insertTableDesc != null) {
            return insertCommitWork(db, insertTableDesc);
        }
        PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
        if (preInsertTableDesc != null) {
            return preInsertWork(db, preInsertTableDesc);
        }
        KillQueryDesc killQueryDesc = work.getKillQueryDesc();
        if (killQueryDesc != null) {
            return killQuery(db, killQueryDesc);
        }
        if (work.getCreateResourcePlanDesc() != null) {
            return createResourcePlan(db, work.getCreateResourcePlanDesc());
        }
        if (work.getShowResourcePlanDesc() != null) {
            return showResourcePlans(db, work.getShowResourcePlanDesc());
        }
        if (work.getAlterResourcePlanDesc() != null) {
            return alterResourcePlan(db, work.getAlterResourcePlanDesc());
        }
        if (work.getDropResourcePlanDesc() != null) {
            return dropResourcePlan(db, work.getDropResourcePlanDesc());
        }
        if (work.getCreateWMTriggerDesc() != null) {
            return createWMTrigger(db, work.getCreateWMTriggerDesc());
        }
        if (work.getAlterWMTriggerDesc() != null) {
            return alterWMTrigger(db, work.getAlterWMTriggerDesc());
        }
        if (work.getDropWMTriggerDesc() != null) {
            return dropWMTrigger(db, work.getDropWMTriggerDesc());
        }
        if (work.getWmPoolDesc() != null) {
            return createOrAlterWMPool(db, work.getWmPoolDesc());
        }
        if (work.getDropWMPoolDesc() != null) {
            return dropWMPool(db, work.getDropWMPoolDesc());
        }
        if (work.getWmMappingDesc() != null) {
            return createOrAlterWMMapping(db, work.getWmMappingDesc());
        }
        if (work.getDropWMMappingDesc() != null) {
            return dropWMMapping(db, work.getDropWMMappingDesc());
        }
        if (work.getTriggerToPoolMappingDesc() != null) {
            return createOrDropTriggerToPoolMapping(db, work.getTriggerToPoolMappingDesc());
        }
        if (work.getAlterMaterializedViewDesc() != null) {
            return alterMaterializedView(db, work.getAlterMaterializedViewDesc());
        }
    } catch (Throwable e) {
        failed(e);
        return 1;
    }
    assert false;
    return 0;
}
Also used : DropDatabaseDesc(org.apache.hadoop.hive.ql.plan.DropDatabaseDesc) LockTableDesc(org.apache.hadoop.hive.ql.plan.LockTableDesc) UnlockTableDesc(org.apache.hadoop.hive.ql.plan.UnlockTableDesc) ShowTblPropertiesDesc(org.apache.hadoop.hive.ql.plan.ShowTblPropertiesDesc) RenamePartitionDesc(org.apache.hadoop.hive.ql.plan.RenamePartitionDesc) ShowDatabasesDesc(org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc) DescFunctionDesc(org.apache.hadoop.hive.ql.plan.DescFunctionDesc) CreateViewDesc(org.apache.hadoop.hive.ql.plan.CreateViewDesc) ShowTxnsDesc(org.apache.hadoop.hive.ql.plan.ShowTxnsDesc) AlterDatabaseDesc(org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc) ShowCompactionsDesc(org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) GrantDesc(org.apache.hadoop.hive.ql.plan.GrantDesc) ShowGrantDesc(org.apache.hadoop.hive.ql.plan.ShowGrantDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) AlterTableSimpleDesc(org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc) DescTableDesc(org.apache.hadoop.hive.ql.plan.DescTableDesc) GrantRevokeRoleDDL(org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL) TruncateTableDesc(org.apache.hadoop.hive.ql.plan.TruncateTableDesc) DropTableDesc(org.apache.hadoop.hive.ql.plan.DropTableDesc) ShowTablesDesc(org.apache.hadoop.hive.ql.plan.ShowTablesDesc) Hive(org.apache.hadoop.hive.ql.metadata.Hive) ShowFunctionsDesc(org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc) AbortTxnsDesc(org.apache.hadoop.hive.ql.plan.AbortTxnsDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) CacheMetadataDesc(org.apache.hadoop.hive.ql.plan.CacheMetadataDesc) UnlockDatabaseDesc(org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc) RevokeDesc(org.apache.hadoop.hive.ql.plan.RevokeDesc) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) MsckDesc(org.apache.hadoop.hive.ql.plan.MsckDesc) ShowLocksDesc(org.apache.hadoop.hive.ql.plan.ShowLocksDesc) SwitchDatabaseDesc(org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc) ShowConfDesc(org.apache.hadoop.hive.ql.plan.ShowConfDesc) ShowPartitionsDesc(org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc) CreateTableLikeDesc(org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc) ShowTableStatusDesc(org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) AlterTableAlterPartDesc(org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc) RoleDDLDesc(org.apache.hadoop.hive.ql.plan.RoleDDLDesc) AlterTablePartMergeFilesDesc(org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc) CreateTableDesc(org.apache.hadoop.hive.ql.plan.CreateTableDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) LockDatabaseDesc(org.apache.hadoop.hive.ql.plan.LockDatabaseDesc) CreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc) ShowCreateDatabaseDesc(org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc) PreInsertTableDesc(org.apache.hadoop.hive.ql.parse.PreInsertTableDesc) InsertTableDesc(org.apache.hadoop.hive.ql.plan.InsertTableDesc) KillQueryDesc(org.apache.hadoop.hive.ql.plan.KillQueryDesc) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc) DescDatabaseDesc(org.apache.hadoop.hive.ql.plan.DescDatabaseDesc) ShowColumnsDesc(org.apache.hadoop.hive.ql.plan.ShowColumnsDesc)

Example 2 with TruncateTableDesc

use of org.apache.hadoop.hive.ql.plan.TruncateTableDesc in project hive by apache.

the class DDLSemanticAnalyzer method analyzeTruncateTable.

private void analyzeTruncateTable(ASTNode ast) throws SemanticException {
    // TOK_TABLE_PARTITION
    ASTNode root = (ASTNode) ast.getChild(0);
    String tableName = getUnescapedName((ASTNode) root.getChild(0));
    Table table = getTable(tableName, true);
    if (table.getTableType() != TableType.MANAGED_TABLE) {
        throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_MANAGED_TABLE.format(tableName));
    }
    if (table.isNonNative()) {
        // TODO
        throw new SemanticException(ErrorMsg.TRUNCATE_FOR_NON_NATIVE_TABLE.format(tableName));
    }
    if (!table.isPartitioned() && root.getChildCount() > 1) {
        throw new SemanticException(ErrorMsg.PARTSPEC_FOR_NON_PARTITIONED_TABLE.format(tableName));
    }
    Map<String, String> partSpec = getPartSpec((ASTNode) root.getChild(1));
    if (partSpec == null) {
        if (!table.isPartitioned()) {
            outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_EXCLUSIVE));
        } else {
            for (Partition partition : getPartitions(table, null, false)) {
                outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
            }
        }
    } else {
        if (isFullSpec(table, partSpec)) {
            validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, true);
            Partition partition = getPartition(table, partSpec, true);
            outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
        } else {
            validatePartSpec(table, partSpec, (ASTNode) root.getChild(1), conf, false);
            for (Partition partition : getPartitions(table, partSpec, false)) {
                outputs.add(new WriteEntity(partition, WriteEntity.WriteType.DDL_EXCLUSIVE));
            }
        }
    }
    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec, null);
    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork);
    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
        try {
            columnNames = getColumnNames((ASTNode) ast.getChild(1));
            // It would be possible to support this, but this is such a pointless command.
            if (AcidUtils.isInsertOnlyTable(table.getParameters())) {
                throw new SemanticException("Truncating MM table columns not presently supported");
            }
            List<String> bucketCols = null;
            Class<? extends InputFormat> inputFormatClass = null;
            boolean isArchived = false;
            Path newTblPartLoc = null;
            Path oldTblPartLoc = null;
            List<FieldSchema> cols = null;
            ListBucketingCtx lbCtx = null;
            boolean isListBucketed = false;
            List<String> listBucketColNames = null;
            if (table.isPartitioned()) {
                Partition part = db.getPartition(table, partSpec, false);
                Path tabPath = table.getPath();
                Path partPath = part.getDataLocation();
                // if the table is in a different dfs than the partition,
                // replace the partition's dfs with the table's dfs.
                newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri().getAuthority(), partPath.toUri().getPath());
                oldTblPartLoc = partPath;
                cols = part.getCols();
                bucketCols = part.getBucketCols();
                inputFormatClass = part.getInputFormatClass();
                isArchived = ArchiveUtils.isArchived(part);
                lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(), part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
                isListBucketed = part.isStoredAsSubDirectories();
                listBucketColNames = part.getSkewedColNames();
            } else {
                // input and output are the same
                oldTblPartLoc = table.getPath();
                newTblPartLoc = table.getPath();
                cols = table.getCols();
                bucketCols = table.getBucketCols();
                inputFormatClass = table.getInputFormatClass();
                lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(), table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
                isListBucketed = table.isStoredAsSubDirectories();
                listBucketColNames = table.getSkewedColNames();
            }
            // throw a HiveException for non-rcfile.
            if (!inputFormatClass.equals(RCFileInputFormat.class)) {
                throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
            }
            // throw a HiveException if the table/partition is archived
            if (isArchived) {
                throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
            }
            Set<Integer> columnIndexes = new HashSet<Integer>();
            for (String columnName : columnNames) {
                boolean found = false;
                for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
                    if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
                        columnIndexes.add(columnIndex);
                        found = true;
                        break;
                    }
                }
                // Throw an exception if the user is trying to truncate a column which doesn't exist
                if (!found) {
                    throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
                }
                // Throw an exception if the table/partition is bucketed on one of the columns
                for (String bucketCol : bucketCols) {
                    if (bucketCol.equalsIgnoreCase(columnName)) {
                        throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
                    }
                }
                if (isListBucketed) {
                    for (String listBucketCol : listBucketColNames) {
                        if (listBucketCol.equalsIgnoreCase(columnName)) {
                            throw new SemanticException(ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
                        }
                    }
                }
            }
            truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));
            truncateTblDesc.setInputDir(oldTblPartLoc);
            truncateTblDesc.setLbCtx(lbCtx);
            addInputsOutputsAlterTable(tableName, partSpec, AlterTableTypes.TRUNCATE);
            ddlWork.setNeedLock(true);
            TableDesc tblDesc = Utilities.getTableDesc(table);
            // Write the output to temporary directory and move it to the final location at the end
            // so the operation is atomic.
            Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
            truncateTblDesc.setOutputDir(queryTmpdir);
            LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, partSpec == null ? new HashMap<>() : partSpec);
            ltd.setLbCtx(lbCtx);
            @SuppressWarnings("unchecked") Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false));
            truncateTask.addDependentTask(moveTsk);
            // Recalculate the HDFS stats if auto gather stats is set
            if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
                BasicStatsWork basicStatsWork;
                if (oldTblPartLoc.equals(newTblPartLoc)) {
                    // If we're merging to the same location, we can avoid some metastore calls
                    TableSpec tablepart = new TableSpec(this.db, conf, root);
                    basicStatsWork = new BasicStatsWork(tablepart);
                } else {
                    basicStatsWork = new BasicStatsWork(ltd);
                }
                basicStatsWork.setNoStatsAggregator(true);
                basicStatsWork.setClearAggregatorStats(true);
                StatsWork columnStatsWork = new StatsWork(table, basicStatsWork, conf);
                Task<? extends Serializable> statTask = TaskFactory.get(columnStatsWork);
                moveTsk.addDependentTask(statTask);
            }
        } catch (HiveException e) {
            throw new SemanticException(e);
        }
    }
    rootTasks.add(truncateTask);
}
Also used : MoveWork(org.apache.hadoop.hive.ql.plan.MoveWork) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StatsWork(org.apache.hadoop.hive.ql.plan.StatsWork) BasicStatsWork(org.apache.hadoop.hive.ql.plan.BasicStatsWork) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) BasicStatsWork(org.apache.hadoop.hive.ql.plan.BasicStatsWork) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) Partition(org.apache.hadoop.hive.ql.metadata.Partition) AlterTableExchangePartition(org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition) Table(org.apache.hadoop.hive.ql.metadata.Table) TruncateTableDesc(org.apache.hadoop.hive.ql.plan.TruncateTableDesc) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) DDLWork(org.apache.hadoop.hive.ql.plan.DDLWork) RCFileInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat) DescTableDesc(org.apache.hadoop.hive.ql.plan.DescTableDesc) LoadTableDesc(org.apache.hadoop.hive.ql.plan.LoadTableDesc) AlterTableDesc(org.apache.hadoop.hive.ql.plan.AlterTableDesc) UnlockTableDesc(org.apache.hadoop.hive.ql.plan.UnlockTableDesc) DropTableDesc(org.apache.hadoop.hive.ql.plan.DropTableDesc) ShowCreateTableDesc(org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) LockTableDesc(org.apache.hadoop.hive.ql.plan.LockTableDesc) TruncateTableDesc(org.apache.hadoop.hive.ql.plan.TruncateTableDesc)

Aggregations

AlterTableDesc (org.apache.hadoop.hive.ql.plan.AlterTableDesc)2 AlterTableExchangePartition (org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition)2 DescTableDesc (org.apache.hadoop.hive.ql.plan.DescTableDesc)2 DropTableDesc (org.apache.hadoop.hive.ql.plan.DropTableDesc)2 LockTableDesc (org.apache.hadoop.hive.ql.plan.LockTableDesc)2 ShowCreateTableDesc (org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc)2 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 LinkedHashMap (java.util.LinkedHashMap)1 Path (org.apache.hadoop.fs.Path)1 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)1 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)1 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)1 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)1 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)1 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)1 RCFileInputFormat (org.apache.hadoop.hive.ql.io.RCFileInputFormat)1 DefaultConstraint (org.apache.hadoop.hive.ql.metadata.DefaultConstraint)1 Hive (org.apache.hadoop.hive.ql.metadata.Hive)1 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)1