use of org.apache.hadoop.hive.ql.exec.MaterializedViewDesc in project hive by apache.
the class SemanticAnalyzer method genFileSinkPlan.
@SuppressWarnings("nls")
protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException {
RowResolver inputRR = opParseCtx.get(input).getRowResolver();
QBMetaData qbm = qb.getMetaData();
Integer dest_type = qbm.getDestTypeForAlias(dest);
// destination table if any
Table dest_tab = null;
// true for full ACID table and MM table
boolean destTableIsTransactional;
// should the destination table be written to using ACID
boolean destTableIsFullAcid;
boolean destTableIsTemporary = false;
boolean destTableIsMaterialization = false;
// destination partition if any
Partition dest_part = null;
// the intermediate destination directory
Path queryTmpdir = null;
// the final destination directory
Path dest_path = null;
TableDesc table_desc = null;
int currentTableId = 0;
boolean isLocal = false;
SortBucketRSCtx rsCtx = new SortBucketRSCtx();
DynamicPartitionCtx dpCtx = null;
LoadTableDesc ltd = null;
ListBucketingCtx lbCtx = null;
Map<String, String> partSpec = null;
boolean isMmTable = false, isMmCtas = false;
Long writeId = null;
HiveTxnManager txnMgr = SessionState.get().getTxnMgr();
switch(dest_type.intValue()) {
case QBMetaData.DEST_TABLE:
{
dest_tab = qbm.getDestTableForAlias(dest);
destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab);
destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab);
destTableIsTemporary = dest_tab.isTemporary();
// Is the user trying to insert into a external tables
checkExternalTable(dest_tab);
partSpec = qbm.getPartSpecForAlias(dest);
dest_path = dest_tab.getPath();
checkImmutableTable(qb, dest_tab, dest_path, false);
// check for partition
List<FieldSchema> parts = dest_tab.getPartitionKeys();
if (parts != null && parts.size() > 0) {
// table is partitioned
if (partSpec == null || partSpec.size() == 0) {
// user did NOT specify partition
throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
}
dpCtx = qbm.getDPCtx(dest);
if (dpCtx == null) {
dest_tab.validatePartColumnNames(partSpec, false);
dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
qbm.setDPCtx(dest, dpCtx);
}
}
// Check for dynamic partitions.
dpCtx = checkDynPart(qb, qbm, dest_tab, partSpec, dest);
if (dpCtx != null && dpCtx.getSPPath() != null) {
dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath());
}
boolean isNonNativeTable = dest_tab.isNonNative();
isMmTable = AcidUtils.isInsertOnlyTable(dest_tab.getParameters());
if (isNonNativeTable || isMmTable) {
queryTmpdir = dest_path;
} else {
queryTmpdir = ctx.getTempDirForFinalJobPath(dest_path);
}
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + dest_path);
}
if (dpCtx != null) {
// set the root of the temporary path where dynamic partition columns will populate
dpCtx.setRootPath(queryTmpdir);
}
// this table_desc does not contain the partitioning columns
table_desc = Utilities.getTableDesc(dest_tab);
// Add NOT NULL constraint check
input = genConstraintsPlan(dest, qb, input);
// Add sorting/bucketing if needed
input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx);
idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName());
currentTableId = destTableId;
destTableId++;
lbCtx = constructListBucketingCtx(dest_tab.getSkewedColNames(), dest_tab.getSkewedColValues(), dest_tab.getSkewedColValueLocationMaps(), dest_tab.isStoredAsSubDirectories(), conf);
// NOTE: specify Dynamic partitions in dest_tab for WriteEntity
if (!isNonNativeTable) {
AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
if (destTableIsFullAcid) {
acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
// todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM
checkAcidConstraints(qb, table_desc, dest_tab);
}
try {
if (ctx.getExplainConfig() != null) {
// For explain plan, txn won't be opened and doesn't make sense to allocate write id
writeId = 0L;
} else {
if (isMmTable) {
writeId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName());
} else {
writeId = acidOp == Operation.NOT_ACID ? null : txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName());
}
}
} catch (LockException ex) {
throw new SemanticException("Failed to allocate write Id", ex);
}
boolean isReplace = !qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName());
ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, writeId);
// For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
// deltas and base and leave them up to the cleaner to clean up
boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName());
LoadFileType loadType = (!isInsertInto && !destTableIsTransactional) ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
ltd.setLoadFileType(loadType);
ltd.setInsertOverwrite(!isInsertInto);
ltd.setLbCtx(lbCtx);
loadTableWork.add(ltd);
} else {
// This is a non-native table.
// We need to set stats as inaccurate.
setStatsForNonNativeTable(dest_tab);
// true if it is insert overwrite.
boolean overwrite = !qb.getParseInfo().isInsertIntoTable(String.format("%s.%s", dest_tab.getDbName(), dest_tab.getTableName()));
createInsertDesc(dest_tab, overwrite);
}
if (dest_tab.isMaterializedView()) {
materializedViewUpdateDesc = new MaterializedViewDesc(dest_tab.getFullyQualifiedName(), false, false, true);
}
WriteEntity output = generateTableWriteEntity(dest, dest_tab, partSpec, ltd, dpCtx, isNonNativeTable);
ctx.getLoadTableOutputMap().put(ltd, output);
break;
}
case QBMetaData.DEST_PARTITION:
{
dest_part = qbm.getDestPartitionForAlias(dest);
dest_tab = dest_part.getTable();
destTableIsTransactional = AcidUtils.isTransactionalTable(dest_tab);
destTableIsFullAcid = AcidUtils.isFullAcidTable(dest_tab);
checkExternalTable(dest_tab);
Path tabPath = dest_tab.getPath();
Path partPath = dest_part.getDataLocation();
checkImmutableTable(qb, dest_tab, partPath, true);
// if the table is in a different dfs than the partition,
// replace the partition's dfs with the table's dfs.
dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri().getAuthority(), partPath.toUri().getPath());
isMmTable = AcidUtils.isInsertOnlyTable(dest_tab.getParameters());
queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForFinalJobPath(dest_path);
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + dest_path);
}
table_desc = Utilities.getTableDesc(dest_tab);
// Add NOT NULL constraint check
input = genConstraintsPlan(dest, qb, input);
// Add sorting/bucketing if needed
input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx);
idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName());
currentTableId = destTableId;
destTableId++;
lbCtx = constructListBucketingCtx(dest_part.getSkewedColNames(), dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(), dest_part.isStoredAsSubDirectories(), conf);
AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
if (destTableIsFullAcid) {
acidOp = getAcidType(table_desc.getOutputFileFormatClass(), dest);
// todo: should this be done for MM? is it ok to use CombineHiveInputFormat with MM?
checkAcidConstraints(qb, table_desc, dest_tab);
}
try {
if (ctx.getExplainConfig() != null) {
// For explain plan, txn won't be opened and doesn't make sense to allocate write id
writeId = 0L;
} else {
if (isMmTable) {
writeId = txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName());
} else {
writeId = (acidOp == Operation.NOT_ACID) ? null : txnMgr.getTableWriteId(dest_tab.getDbName(), dest_tab.getTableName());
}
}
} catch (LockException ex) {
throw new SemanticException("Failed to allocate write Id", ex);
}
ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, writeId);
// For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
// deltas and base and leave them up to the cleaner to clean up
boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName());
LoadFileType loadType = (!isInsertInto && !destTableIsTransactional) ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING;
ltd.setLoadFileType(loadType);
ltd.setInsertOverwrite(!isInsertInto);
ltd.setLbCtx(lbCtx);
loadTableWork.add(ltd);
if (!outputs.add(new WriteEntity(dest_part, determineWriteType(ltd, dest_tab.isNonNative(), dest)))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(dest_tab.getTableName() + "@" + dest_part.getName()));
}
break;
}
case QBMetaData.DEST_LOCAL_FILE:
isLocal = true;
// fall through
case QBMetaData.DEST_DFS_FILE:
{
dest_path = new Path(qbm.getDestFileForAlias(dest));
ArrayList<ColumnInfo> colInfos = inputRR.getColumnInfos();
// CTAS case: the file output format and serde are defined by the create
// table command rather than taking the default value
List<FieldSchema> field_schemas = null;
CreateTableDesc tblDesc = qb.getTableDesc();
CreateViewDesc viewDesc = qb.getViewDesc();
boolean isCtas = false;
if (tblDesc != null) {
field_schemas = new ArrayList<FieldSchema>();
destTableIsTemporary = tblDesc.isTemporary();
destTableIsMaterialization = tblDesc.isMaterialization();
if (AcidUtils.isInsertOnlyTable(tblDesc.getTblProps(), true)) {
isMmTable = isMmCtas = true;
try {
if (ctx.getExplainConfig() != null) {
// For explain plan, txn won't be opened and doesn't make sense to allocate write id
writeId = 0L;
} else {
writeId = txnMgr.getTableWriteId(tblDesc.getDatabaseName(), tblDesc.getTableName());
}
} catch (LockException ex) {
throw new SemanticException("Failed to allocate write Id", ex);
}
tblDesc.setInitialMmWriteId(writeId);
}
} else if (viewDesc != null) {
field_schemas = new ArrayList<FieldSchema>();
destTableIsTemporary = false;
}
if (isLocal) {
assert !isMmTable;
// for local directory - we always write to map-red intermediate
// store and then copy to local fs
queryTmpdir = ctx.getMRTmpPath();
} else {
// no copy is required. we may want to revisit this policy in future
try {
Path qPath = FileUtils.makeQualified(dest_path, conf);
queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir + " from " + dest_path + " (" + isMmTable + ")");
}
} catch (Exception e) {
throw new SemanticException("Error creating temporary folder on: " + dest_path, e);
}
}
ColsAndTypes ct = deriveFileSinkColTypes(inputRR, field_schemas);
String cols = ct.cols, colTypes = ct.colTypes;
// update the create table descriptor with the resulting schema.
if (tblDesc != null) {
tblDesc.setCols(new ArrayList<FieldSchema>(field_schemas));
} else if (viewDesc != null) {
viewDesc.setSchema(new ArrayList<FieldSchema>(field_schemas));
}
destTableIsTransactional = tblDesc != null && AcidUtils.isTransactionalTable(tblDesc);
destTableIsFullAcid = tblDesc != null && AcidUtils.isFullAcidTable(tblDesc);
boolean isDestTempFile = true;
if (!ctx.isMRTmpFileURI(dest_path.toUri().toString())) {
idToTableNameMap.put(String.valueOf(destTableId), dest_path.toUri().toString());
currentTableId = destTableId;
destTableId++;
isDestTempFile = false;
}
boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE);
// Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols, colTypes, // there is a change here - prev version had 'transadtional', one beofre' acid'
destTableIsFullAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCtas));
if (tblDesc == null) {
if (viewDesc != null) {
table_desc = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
} else if (qb.getIsQuery()) {
String fileFormat;
if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
fileFormat = "SequenceFile";
HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT, fileFormat);
table_desc = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, ThriftJDBCBinarySerDe.class);
// Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
// write out formatted thrift objects to SequenceFile
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
} else {
fileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
serdeClass = LazyBinarySerDe2.class;
}
table_desc = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
}
} else {
table_desc = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
}
} else {
table_desc = PlanUtils.getTableDesc(tblDesc, cols, colTypes);
}
if (!outputs.add(new WriteEntity(dest_path, !isDfsDir, isDestTempFile))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(dest_path.toUri().toString()));
}
break;
}
default:
throw new SemanticException("Unknown destination type: " + dest_type);
}
if (!(dest_type.intValue() == QBMetaData.DEST_DFS_FILE && qb.getIsQuery())) {
input = genConversionSelectOperator(dest, qb, input, table_desc, dpCtx);
}
inputRR = opParseCtx.get(input).getRowResolver();
ArrayList<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
if (updating(dest) || deleting(dest)) {
vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true));
} else {
try {
StructObjectInspector rowObjectInspector = (StructObjectInspector) table_desc.getDeserializer(conf).getObjectInspector();
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
for (int i = 0; i < fields.size(); i++) {
vecCol.add(new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), "", false));
}
} catch (Exception e) {
throw new SemanticException(e.getMessage(), e);
}
}
RowSchema fsRS = new RowSchema(vecCol);
// The output files of a FileSink can be merged if they are either not being written to a table
// or are being written to a table which is not bucketed
// and table the table is not sorted
boolean canBeMerged = (dest_tab == null || !((dest_tab.getNumBuckets() > 0) || (dest_tab.getSortCols() != null && dest_tab.getSortCols().size() > 0)));
// If this table is working with ACID semantics, turn off merging
canBeMerged &= !destTableIsFullAcid;
// Generate the partition columns from the parent input
if (dest_type.intValue() == QBMetaData.DEST_TABLE || dest_type.intValue() == QBMetaData.DEST_PARTITION) {
genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx);
}
FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, table_desc, dest_part, // this was 1/4 acid
dest_path, // this was 1/4 acid
currentTableId, // this was 1/4 acid
destTableIsFullAcid, // this was 1/4 acid
destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, dest_tab, writeId, isMmCtas, dest_type, qb);
if (isMmCtas) {
// Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
tableDesc.setWriter(fileSinkDesc);
}
if (fileSinkDesc.getInsertOverwrite()) {
if (ltd != null) {
ltd.setInsertOverwrite(true);
}
}
if (SessionState.get().isHiveServerQuery() && null != table_desc && table_desc.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(true);
} else {
fileSinkDesc.setIsUsingThriftJDBCBinarySerDe(false);
}
Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(fileSinkDesc, fsRS, input), inputRR);
handleLineage(ltd, output);
if (LOG.isDebugEnabled()) {
LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: " + dest_path + " row schema: " + inputRR.toString());
}
FileSinkOperator fso = (FileSinkOperator) output;
fso.getConf().setTable(dest_tab);
// and it is an insert overwrite or insert into table
if (dest_tab != null && !dest_tab.isNonNative() && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
if (dest_type.intValue() == QBMetaData.DEST_TABLE) {
genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName()));
} else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) {
genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName()));
}
}
return output;
}
use of org.apache.hadoop.hive.ql.exec.MaterializedViewDesc in project hive by apache.
the class TaskCompiler method compile.
@SuppressWarnings({ "nls", "unchecked" })
public void compile(final ParseContext pCtx, final List<Task<? extends Serializable>> rootTasks, final HashSet<ReadEntity> inputs, final HashSet<WriteEntity> outputs) throws SemanticException {
Context ctx = pCtx.getContext();
GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx();
List<Task<MoveWork>> mvTask = new ArrayList<>();
List<LoadTableDesc> loadTableWork = pCtx.getLoadTableWork();
List<LoadFileDesc> loadFileWork = pCtx.getLoadFileWork();
boolean isCStats = pCtx.getQueryProperties().isAnalyzeRewrite();
int outerQueryLimit = pCtx.getQueryProperties().getOuterQueryLimit();
if (pCtx.getFetchTask() != null) {
if (pCtx.getFetchTask().getTblDesc() == null) {
return;
}
pCtx.getFetchTask().getWork().setHiveServerQuery(SessionState.get().isHiveServerQuery());
TableDesc resultTab = pCtx.getFetchTask().getTblDesc();
// then either the ThriftFormatter or the DefaultFetchFormatter should be used.
if (!resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())) {
if (SessionState.get().isHiveServerQuery()) {
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName());
} else {
String formatterName = conf.get(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER);
if (formatterName == null || formatterName.isEmpty()) {
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, DefaultFetchFormatter.class.getName());
}
}
}
return;
}
optimizeOperatorPlan(pCtx, inputs, outputs);
/*
* In case of a select, use a fetch task instead of a move task.
* If the select is from analyze table column rewrite, don't create a fetch task. Instead create
* a column stats task later.
*/
if (pCtx.getQueryProperties().isQuery() && !isCStats) {
if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) {
throw new SemanticException(ErrorMsg.INVALID_LOAD_TABLE_FILE_WORK.getMsg());
}
LoadFileDesc loadFileDesc = loadFileWork.get(0);
String cols = loadFileDesc.getColumns();
String colTypes = loadFileDesc.getColumnTypes();
String resFileFormat;
TableDesc resultTab = pCtx.getFetchTableDesc();
if (resultTab == null) {
resFileFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYRESULTFILEFORMAT);
if (SessionState.get().getIsUsingThriftJDBCBinarySerDe() && (resFileFormat.equalsIgnoreCase("SequenceFile"))) {
resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat, ThriftJDBCBinarySerDe.class);
// Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
// read formatted thrift objects from the output SequenceFile written by Tasks.
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
} else {
resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, resFileFormat, LazySimpleSerDe.class);
}
} else {
if (resultTab.getProperties().getProperty(serdeConstants.SERIALIZATION_LIB).equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())) {
// Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
// read formatted thrift objects from the output SequenceFile written by Tasks.
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
}
}
FetchWork fetch = new FetchWork(loadFileDesc.getSourcePath(), resultTab, outerQueryLimit);
boolean isHiveServerQuery = SessionState.get().isHiveServerQuery();
fetch.setHiveServerQuery(isHiveServerQuery);
fetch.setSource(pCtx.getFetchSource());
fetch.setSink(pCtx.getFetchSink());
if (isHiveServerQuery && null != resultTab && resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
fetch.setIsUsingThriftJDBCBinarySerDe(true);
} else {
fetch.setIsUsingThriftJDBCBinarySerDe(false);
}
pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch));
// For the FetchTask, the limit optimization requires we fetch all the rows
// in memory and count how many rows we get. It's not practical if the
// limit factor is too big
int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) {
LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimization.");
globalLimitCtx.disableOpt();
}
if (outerQueryLimit == 0) {
// Believe it or not, some tools do generate queries with limit 0 and than expect
// query to run quickly. Lets meet their requirement.
LOG.info("Limit 0. No query execution needed.");
return;
}
} else if (!isCStats) {
for (LoadTableDesc ltd : loadTableWork) {
Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false));
mvTask.add(tsk);
}
boolean oneLoadFileForCtas = true;
for (LoadFileDesc lfd : loadFileWork) {
if (pCtx.getQueryProperties().isCTAS() || pCtx.getQueryProperties().isMaterializedView()) {
if (!oneLoadFileForCtas) {
// should not have more than 1 load file for CTAS.
throw new SemanticException("One query is not expected to contain multiple CTAS loads statements");
}
setLoadFileLocation(pCtx, lfd);
oneLoadFileForCtas = false;
}
mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false)));
}
}
generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs);
// For each task, set the key descriptor for the reducer
for (Task<? extends Serializable> rootTask : rootTasks) {
GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
}
// to be used, please do so
for (Task<? extends Serializable> rootTask : rootTasks) {
setInputFormat(rootTask);
}
optimizeTaskPlan(rootTasks, pCtx, ctx);
/*
* If the query was the result of analyze table column compute statistics rewrite, create
* a column stats task instead of a fetch task to persist stats to the metastore.
* As per HIVE-15903, we will also collect table stats when user computes column stats.
* That means, if isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()
* We need to collect table stats
* if isCStats, we need to include a basic stats task
* else it is ColumnStatsAutoGather, which should have a move task with a stats task already.
*/
if (isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()) {
// map from tablename to task (ColumnStatsTask which includes a BasicStatsTask)
Map<String, StatsTask> map = new LinkedHashMap<>();
if (isCStats) {
if (rootTasks == null || rootTasks.size() != 1 || pCtx.getTopOps() == null || pCtx.getTopOps().size() != 1) {
throw new SemanticException("Can not find correct root task!");
}
try {
Task<? extends Serializable> root = rootTasks.iterator().next();
StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values().iterator().next(), root, outputs);
root.addDependentTask(tsk);
map.put(extractTableFullName(tsk), tsk);
} catch (HiveException e) {
throw new SemanticException(e);
}
genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0);
} else {
Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
getLeafTasks(rootTasks, leafTasks);
List<Task<? extends Serializable>> nonStatsLeafTasks = new ArrayList<>();
for (Task<? extends Serializable> tsk : leafTasks) {
// map table name to the correct ColumnStatsTask
if (tsk instanceof StatsTask) {
map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk);
} else {
nonStatsLeafTasks.add(tsk);
}
}
// add cStatsTask as a dependent of all the nonStatsLeafTasks
for (Task<? extends Serializable> tsk : nonStatsLeafTasks) {
for (Task<? extends Serializable> cStatsTask : map.values()) {
tsk.addDependentTask(cStatsTask);
}
}
for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx.getColumnStatsAutoGatherContexts()) {
if (!columnStatsAutoGatherContext.isInsertInto()) {
genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, 0);
} else {
int numBitVector;
try {
numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
} catch (Exception e) {
throw new SemanticException(e.getMessage());
}
genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, numBitVector);
}
}
}
}
decideExecMode(rootTasks, ctx, globalLimitCtx);
if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization()) {
// generate a DDL task and make it a dependent task of the leaf
CreateTableDesc crtTblDesc = pCtx.getCreateTable();
crtTblDesc.validate(conf);
Task<? extends Serializable> crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc));
patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtTblTask);
} else if (pCtx.getQueryProperties().isMaterializedView()) {
// generate a DDL task and make it a dependent task of the leaf
CreateViewDesc viewDesc = pCtx.getCreateViewDesc();
Task<? extends Serializable> crtViewTask = TaskFactory.get(new DDLWork(inputs, outputs, viewDesc));
patchUpAfterCTASorMaterializedView(rootTasks, outputs, crtViewTask);
} else if (pCtx.getMaterializedViewUpdateDesc() != null) {
// If there is a materialized view update desc, we create introduce it at the end
// of the tree.
MaterializedViewDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc();
Set<Task<? extends Serializable>> leafTasks = new LinkedHashSet<Task<? extends Serializable>>();
getLeafTasks(rootTasks, leafTasks);
Task<? extends Serializable> materializedViewTask = TaskFactory.get(materializedViewDesc, conf);
for (Task<? extends Serializable> task : leafTasks) {
task.addDependentTask(materializedViewTask);
}
}
if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit());
pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit());
}
if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) {
LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());
}
Interner<TableDesc> interner = Interners.newStrongInterner();
for (Task<? extends Serializable> rootTask : rootTasks) {
GenMapRedUtils.internTableDesc(rootTask, interner);
GenMapRedUtils.deriveFinalExplainAttributes(rootTask, pCtx.getConf());
}
}
use of org.apache.hadoop.hive.ql.exec.MaterializedViewDesc in project hive by apache.
the class TaskCompiler method patchUpAfterCTASorMaterializedView.
private void patchUpAfterCTASorMaterializedView(final List<Task<? extends Serializable>> rootTasks, final HashSet<WriteEntity> outputs, Task<? extends Serializable> createTask) {
// clear the mapredWork output file from outputs for CTAS
// DDLWork at the tail of the chain will have the output
Iterator<WriteEntity> outIter = outputs.iterator();
while (outIter.hasNext()) {
switch(outIter.next().getType()) {
case DFS_DIR:
case LOCAL_DIR:
outIter.remove();
break;
default:
break;
}
}
// find all leaf tasks and make the DDLTask as a dependent task on all of them
HashSet<Task<? extends Serializable>> leaves = new LinkedHashSet<>();
getLeafTasks(rootTasks, leaves);
assert (leaves.size() > 0);
Task<? extends Serializable> targetTask = createTask;
for (Task<? extends Serializable> task : leaves) {
if (task instanceof StatsTask) {
// StatsTask require table to already exist
for (Task<? extends Serializable> parentOfStatsTask : task.getParentTasks()) {
parentOfStatsTask.addDependentTask(createTask);
}
for (Task<? extends Serializable> parentOfCrtTblTask : createTask.getParentTasks()) {
parentOfCrtTblTask.removeDependentTask(task);
}
createTask.addDependentTask(task);
targetTask = task;
} else {
task.addDependentTask(createTask);
}
}
// Add task to insert / delete materialized view from registry if needed
if (createTask instanceof DDLTask) {
DDLTask ddlTask = (DDLTask) createTask;
DDLWork work = ddlTask.getWork();
String tableName = null;
boolean retrieveAndInclude = false;
boolean disableRewrite = false;
if (work.getCreateViewDesc() != null && work.getCreateViewDesc().isMaterialized()) {
tableName = work.getCreateViewDesc().getViewName();
retrieveAndInclude = work.getCreateViewDesc().isRewriteEnabled();
} else if (work.getAlterMaterializedViewDesc() != null) {
tableName = work.getAlterMaterializedViewDesc().getMaterializedViewName();
if (work.getAlterMaterializedViewDesc().isRewriteEnable()) {
retrieveAndInclude = true;
} else {
disableRewrite = true;
}
} else {
return;
}
targetTask.addDependentTask(TaskFactory.get(new MaterializedViewDesc(tableName, retrieveAndInclude, disableRewrite, false), conf));
}
}
Aggregations