use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class GenMapRedUtils method createMRWorkForMergingFiles.
/**
* @param fsInput The FileSink operator.
* @param ctx The MR processing context.
* @param finalName the final destination path the merge job should output.
* @param dependencyTask
* @param mvTasks
* @param conf
* @param currTask
* @throws SemanticException
* create a Map-only merge job using CombineHiveInputFormat for all partitions with
* following operators:
* MR job J0:
* ...
* |
* v
* FileSinkOperator_1 (fsInput)
* |
* v
* Merge job J1:
* |
* v
* TableScan (using CombineHiveInputFormat) (tsMerge)
* |
* v
* FileSinkOperator (fsMerge)
*
* Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
* do
* not contain the dynamic partitions (their parent). So after the dynamic partitions are
* created (after the first job finished before the moveTask or ConditionalTask start),
* we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
* partition
* directories.
*
*/
public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, Path finalName, DependencyCollectionTask dependencyTask, List<Task<MoveWork>> mvTasks, HiveConf conf, Task<? extends Serializable> currTask) throws SemanticException {
//
// 1. create the operator tree
//
FileSinkDesc fsInputDesc = fsInput.getConf();
// Create a TableScan operator
RowSchema inputRS = fsInput.getSchema();
TableScanOperator tsMerge = GenMapRedUtils.createTemporaryTableScanOperator(fsInput.getCompilationOpContext(), inputRS);
// Create a FileSink operator
TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone();
FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT));
FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild(fsOutputDesc, inputRS, tsMerge);
// If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema
// needs to include the partition column, and the fsOutput should have
// a DynamicPartitionCtx to indicate that it needs to dynamically partitioned.
DynamicPartitionCtx dpCtx = fsInputDesc.getDynPartCtx();
if (dpCtx != null && dpCtx.getNumDPCols() > 0) {
// adding DP ColumnInfo to the RowSchema signature
ArrayList<ColumnInfo> signature = inputRS.getSignature();
String tblAlias = fsInputDesc.getTableInfo().getTableName();
for (String dpCol : dpCtx.getDPColNames()) {
ColumnInfo colInfo = new ColumnInfo(dpCol, // all partition column type should be string
TypeInfoFactory.stringTypeInfo, tblAlias, // partition column is virtual column
true);
signature.add(colInfo);
}
inputRS.setSignature(signature);
// create another DynamicPartitionCtx, which has a different input-to-DP column mapping
DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx);
fsOutputDesc.setDynPartCtx(dpCtx2);
// update the FileSinkOperator to include partition columns
usePartitionColumns(fsInputDesc.getTableInfo().getProperties(), dpCtx.getDPColNames());
} else {
// non-partitioned table
fsInputDesc.getTableInfo().getProperties().remove(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS);
}
//
// 2. Constructing a conditional task consisting of a move task and a map reduce task
//
MoveWork dummyMv = new MoveWork(null, null, null, new LoadFileDesc(fsInputDesc.getFinalDirName(), finalName, true, null, null), false);
MapWork cplan;
Serializable work;
if ((conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class))) {
cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName, dpCtx != null && dpCtx.getNumDPCols() > 0, fsInput.getCompilationOpContext());
if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf);
cplan.setName("File Merge");
((TezWork) work).add(cplan);
} else if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
work = new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
cplan.setName("Spark Merge File Work");
((SparkWork) work).add(cplan);
} else {
work = cplan;
}
} else {
cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc);
if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf);
cplan.setName("File Merge");
((TezWork) work).add(cplan);
} else if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
work = new SparkWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
cplan.setName("Spark Merge File Work");
((SparkWork) work).add(cplan);
} else {
work = new MapredWork();
((MapredWork) work).setMapWork(cplan);
}
}
// use CombineHiveInputFormat for map-only merging
cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat");
// NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't
// know if merge MR2 will be triggered at execution time
Task<MoveWork> mvTask = GenMapRedUtils.findMoveTask(mvTasks, fsOutput);
ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work, fsInputDesc.getFinalDirName(), finalName, mvTask, dependencyTask);
// keep the dynamic partition context in conditional task resolver context
ConditionalResolverMergeFilesCtx mrCtx = (ConditionalResolverMergeFilesCtx) cndTsk.getResolverCtx();
mrCtx.setDPCtx(fsInputDesc.getDynPartCtx());
mrCtx.setLbCtx(fsInputDesc.getLbCtx());
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class RewriteGBUsingIndex method getIndexesForRewrite.
/**
* Get a list of indexes which can be used for rewrite.
* @return
* @throws SemanticException
*/
private Map<Table, List<Index>> getIndexesForRewrite() throws SemanticException {
List<String> supportedIndexes = new ArrayList<String>();
supportedIndexes.add(AggregateIndexHandler.class.getName());
// query the metastore to know what columns we have indexed
Collection<TableScanOperator> topTables = parseContext.getTopOps().values();
Map<Table, List<Index>> indexes = new HashMap<Table, List<Index>>();
for (TableScanOperator op : topTables) {
TableScanOperator tsOP = op;
List<Index> tblIndexes = IndexUtils.getIndexes(tsOP.getConf().getTableMetadata(), supportedIndexes);
if (tblIndexes.size() > 0) {
indexes.put(tsOP.getConf().getTableMetadata(), tblIndexes);
}
}
return indexes;
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class GenTezUtils method processDynamicMinMaxPushDownOperator.
public static void processDynamicMinMaxPushDownOperator(GenTezProcContext procCtx, RuntimeValuesInfo runtimeValuesInfo, ReduceSinkOperator rs) throws SemanticException {
TableScanOperator ts = procCtx.parseContext.getRsOpToTsOpMap().get(rs);
List<BaseWork> rsWorkList = procCtx.childToWorkMap.get(rs);
if (ts == null || rsWorkList == null) {
// detection logic. Nothing to do here.
return;
}
LOG.debug("ResduceSink " + rs + " to TableScan " + ts);
if (rsWorkList.size() != 1) {
StringBuilder sb = new StringBuilder();
for (BaseWork curWork : rsWorkList) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(curWork.getName());
}
throw new SemanticException(rs + " belongs to multiple BaseWorks: " + sb.toString());
}
BaseWork parentWork = rsWorkList.get(0);
BaseWork childWork = procCtx.rootToWorkMap.get(ts);
// Connect parent/child work with a brodacast edge.
LOG.debug("Connecting Baswork - " + parentWork.getName() + " to " + childWork.getName());
TezEdgeProperty edgeProperty = new TezEdgeProperty(EdgeType.BROADCAST_EDGE);
TezWork tezWork = procCtx.currentTask.getWork();
tezWork.connect(parentWork, childWork, edgeProperty);
// Set output names in ReduceSink
rs.getConf().setOutputName(childWork.getName());
// Set up the dynamic values in the childWork.
RuntimeValuesInfo childRuntimeValuesInfo = new RuntimeValuesInfo();
childRuntimeValuesInfo.setTableDesc(runtimeValuesInfo.getTableDesc());
childRuntimeValuesInfo.setDynamicValueIDs(runtimeValuesInfo.getDynamicValueIDs());
childRuntimeValuesInfo.setColExprs(runtimeValuesInfo.getColExprs());
childWork.setInputSourceToRuntimeValuesInfo(parentWork.getName(), childRuntimeValuesInfo);
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class MapWork method checkVectorizerSupportedTypes.
private boolean checkVectorizerSupportedTypes(boolean hasLlap) {
for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : aliasToWork.entrySet()) {
final String alias = entry.getKey();
Operator<? extends OperatorDesc> op = entry.getValue();
PartitionDesc partitionDesc = aliasToPartnInfo.get(alias);
if (op instanceof TableScanOperator && partitionDesc != null && partitionDesc.getTableDesc() != null) {
final TableScanOperator tsOp = (TableScanOperator) op;
final List<String> readColumnNames = tsOp.getNeededColumns();
final Properties props = partitionDesc.getTableDesc().getProperties();
final List<TypeInfo> typeInfos = TypeInfoUtils.getTypeInfosFromTypeString(props.getProperty(serdeConstants.LIST_COLUMN_TYPES));
final List<String> allColumnTypes = TypeInfoUtils.getTypeStringsFromTypeInfo(typeInfos);
final List<String> allColumnNames = Utilities.getColumnNames(props);
hasLlap = Utilities.checkVectorizerSupportedTypes(readColumnNames, allColumnNames, allColumnTypes);
}
}
return hasLlap;
}
use of org.apache.hadoop.hive.ql.exec.TableScanOperator in project hive by apache.
the class SemanticAnalyzer method enforceScanLimits.
private void enforceScanLimits(ParseContext pCtx, FetchTask fTask) throws SemanticException {
int scanLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITTABLESCANPARTITION);
if (scanLimit > -1) {
// a scan limit on the number of partitions has been set by the user
if (fTask != null) {
// launch a job on the cluster
if (!fTask.getWork().isNotPartitioned() && fTask.getWork().getLimit() == -1 && scanLimit < fTask.getWork().getPartDir().size()) {
throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + fTask.getWork().getPartDir().size(), "" + fTask.getWork().getTblDesc().getTableName(), "" + scanLimit);
}
} else {
// check whether any of them break the limit
for (Operator<?> topOp : topOps.values()) {
if (topOp instanceof TableScanOperator) {
TableScanOperator tsOp = (TableScanOperator) topOp;
if (tsOp.getConf().getIsMetadataOnly()) {
continue;
}
PrunedPartitionList parts = pCtx.getPrunedPartitions(tsOp);
if (!parts.getSourceTable().isPartitioned()) {
continue;
}
if (parts.getPartitions().size() > scanLimit) {
throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(), "" + scanLimit);
}
}
}
}
}
}
Aggregations