use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.
the class MoveTask method execute.
@Override
public int execute() {
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("Executing MoveWork " + System.identityHashCode(work) + " with " + work.getLoadFileWork() + "; " + work.getLoadTableWork() + "; " + work.getLoadMultiFilesWork());
}
if (context.getExplainAnalyze() == AnalyzeState.RUNNING) {
return 0;
}
try (LocalTableLock lock = acquireLockForFileMove(work.getLoadTableWork())) {
if (checkAndCommitNatively(work, conf)) {
return 0;
}
Hive db = getHive();
// Do any hive related operations like moving tables and files
// to appropriate locations
LoadFileDesc lfd = work.getLoadFileWork();
if (lfd != null) {
Path targetPath = lfd.getTargetDir();
Path sourcePath = lfd.getSourcePath();
if (targetPath.equals(sourcePath)) {
Utilities.FILE_OP_LOGGER.debug("MoveTask not moving " + sourcePath);
} else {
Utilities.FILE_OP_LOGGER.debug("MoveTask moving " + sourcePath + " to " + targetPath);
if (lfd.getWriteType() == AcidUtils.Operation.INSERT) {
// 'sourcePath' result of 'select ...' part of CTAS statement
assert lfd.getIsDfsDir();
FileSystem srcFs = sourcePath.getFileSystem(conf);
FileStatus[] srcs = srcFs.globStatus(sourcePath);
if (srcs != null) {
Hive.moveAcidFiles(srcFs, srcs, targetPath, null);
} else {
LOG.debug("No files found to move from " + sourcePath + " to " + targetPath);
}
} else {
moveFile(sourcePath, targetPath, lfd.getIsDfsDir());
}
}
}
// Multi-file load is for dynamic partitions when some partitions do not
// need to merge and they can simply be moved to the target directory.
// This is also used for MM table conversion.
LoadMultiFilesDesc lmfd = work.getLoadMultiFilesWork();
if (lmfd != null) {
boolean isDfsDir = lmfd.getIsDfsDir();
List<String> targetPrefixes = lmfd.getTargetPrefixes();
for (int i = 0; i < lmfd.getSourceDirs().size(); ++i) {
Path srcPath = lmfd.getSourceDirs().get(i);
Path destPath = lmfd.getTargetDirs().get(i);
if (destPath.equals(srcPath)) {
continue;
}
String filePrefix = targetPrefixes == null ? null : targetPrefixes.get(i);
FileSystem destFs = destPath.getFileSystem(conf);
if (filePrefix == null) {
if (!destFs.exists(destPath.getParent())) {
destFs.mkdirs(destPath.getParent());
}
Utilities.FILE_OP_LOGGER.debug("MoveTask moving (multi-file) " + srcPath + " to " + destPath);
moveFile(srcPath, destPath, isDfsDir);
} else {
if (!destFs.exists(destPath)) {
destFs.mkdirs(destPath);
}
FileSystem srcFs = srcPath.getFileSystem(conf);
FileStatus[] children = srcFs.listStatus(srcPath);
if (children != null) {
for (FileStatus child : children) {
Path childSrc = child.getPath();
Path childDest = new Path(destPath, filePrefix + childSrc.getName());
Utilities.FILE_OP_LOGGER.debug("MoveTask moving (multi-file) " + childSrc + " to " + childDest);
moveFile(childSrc, childDest, isDfsDir);
}
} else {
Utilities.FILE_OP_LOGGER.debug("MoveTask skipping empty directory (multi-file) " + srcPath);
}
if (!srcFs.delete(srcPath, false)) {
throw new IOException("Couldn't delete " + srcPath + " after moving all the files");
}
}
}
}
// Next we do this for tables and partitions
LoadTableDesc tbd = work.getLoadTableWork();
if (tbd != null) {
logMessage(tbd);
Table table = db.getTable(tbd.getTable().getTableName());
checkFileFormats(db, tbd, table);
boolean isFullAcidOp = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && // it seems that LoadTableDesc has Operation.INSERT only for CTAS...
!tbd.isMmTable();
// Create a data container
DataContainer dc = null;
if (tbd.getPartitionSpec().size() == 0) {
dc = new DataContainer(table.getTTable());
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName());
}
int statementId = tbd.getStmtId();
if (tbd.isDirectInsert() || tbd.isMmTable()) {
statementId = queryPlan.getStatementIdForAcidWriteType(work.getLoadTableWork().getWriteId(), tbd.getMoveTaskId(), work.getLoadTableWork().getWriteType(), tbd.getSourcePath());
LOG.debug("The statementId used when loading the dynamic partitions is " + statementId);
}
db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getLoadFileType(), work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isFullAcidOp, resetStatisticsProps(table), tbd.getWriteId(), statementId, tbd.isInsertOverwrite(), tbd.isDirectInsert());
if (work.getOutputs() != null) {
DDLUtils.addIfAbsentByName(new WriteEntity(table, getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs());
}
} else {
LOG.info("Partition is: {}", tbd.getPartitionSpec());
// Check if the bucketing and/or sorting columns were inferred
TaskInformation ti = new TaskInformation(this, tbd.getSourcePath().toUri().toString());
inferTaskInformation(ti);
// deal with dynamic partitions
DynamicPartitionCtx dpCtx = tbd.getDPCtx();
if (dpCtx != null && dpCtx.getNumDPCols() > 0) {
// dynamic partitions
dc = handleDynParts(db, table, tbd, ti, dpCtx);
} else {
// static partitions
dc = handleStaticParts(db, table, tbd, ti);
}
}
if (dc != null) {
// If we are doing an update or a delete the number of columns in the table will not
// match the number of columns in the file sink. For update there will be one too many
// (because of the ROW__ID), and in the case of the delete there will be just the
// ROW__ID, which we don't need to worry about from a lineage perspective.
List<FieldSchema> tableCols = null;
switch(work.getLoadTableWork().getWriteType()) {
case DELETE:
case UPDATE:
// Pass an empty list as no columns will be written to the file.
// TODO I should be able to make this work for update
tableCols = new ArrayList<>();
break;
default:
tableCols = table.getCols();
break;
}
queryState.getLineageState().setLineage(tbd.getSourcePath(), dc, tableCols);
}
releaseLocks(tbd);
}
return 0;
} catch (HiveException he) {
int errorCode = 1;
if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) {
errorCode = he.getCanonicalErrorMsg().getErrorCode();
if (he.getCanonicalErrorMsg() == ErrorMsg.UNRESOLVED_RT_EXCEPTION) {
console.printError("Failed with exception " + he.getMessage(), "\n" + StringUtils.stringifyException(he));
} else {
console.printError("Failed with exception " + he.getMessage() + "\nRemote Exception: " + he.getRemoteErrorMsg());
console.printInfo("\n", StringUtils.stringifyException(he), false);
}
}
setException(he);
errorCode = ReplUtils.handleException(work.isReplication(), he, work.getDumpDirectory(), work.getMetricCollector(), getName(), conf);
return errorCode;
} catch (Exception e) {
console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e));
setException(e);
LOG.error("MoveTask failed", e);
return ReplUtils.handleException(work.isReplication(), e, work.getDumpDirectory(), work.getMetricCollector(), getName(), conf);
}
}
use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.
the class GenMapRedUtils method mergeMovePaths.
/**
* Merges the given Conditional input path and the linked MoveWork into one only MoveWork.
* This is an optimization for BlobStore systems to avoid doing two renames or copies that are not necessary.
*
* @param condInputPath A path that the ConditionalTask uses as input for its sub-tasks.
* @param linkedMoveWork A MoveWork that the ConditionalTask uses to link to its sub-tasks.
* @param lineageState A LineageState used to track what changes.
* @return A new MoveWork that has the Conditional input path as source and the linkedMoveWork as target.
*/
@VisibleForTesting
protected static MoveWork mergeMovePaths(Path condInputPath, MoveWork linkedMoveWork, LineageState lineageState) {
MoveWork newWork = new MoveWork(linkedMoveWork);
LoadFileDesc fileDesc = null;
LoadTableDesc tableDesc = null;
if (linkedMoveWork.getLoadFileWork() != null) {
fileDesc = new LoadFileDesc(linkedMoveWork.getLoadFileWork());
fileDesc.setSourcePath(condInputPath);
lineageState.updateDirToOpMap(condInputPath, linkedMoveWork.getLoadFileWork().getSourcePath());
} else if (linkedMoveWork.getLoadTableWork() != null) {
tableDesc = new LoadTableDesc(linkedMoveWork.getLoadTableWork());
tableDesc.setSourcePath(condInputPath);
lineageState.updateDirToOpMap(condInputPath, linkedMoveWork.getLoadTableWork().getSourcePath());
} else {
throw new IllegalArgumentException("Merging a path with a MoveWork with multi-files work is not allowed.");
}
newWork.setLoadFileWork(fileDesc);
newWork.setLoadTableWork(tableDesc);
return newWork;
}
use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.
the class TaskCompiler method compile.
@SuppressWarnings("nls")
public void compile(final ParseContext pCtx, final List<Task<?>> rootTasks, final Set<ReadEntity> inputs, final Set<WriteEntity> outputs) throws SemanticException {
Context ctx = pCtx.getContext();
GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx();
List<Task<MoveWork>> mvTask = new ArrayList<>();
List<LoadTableDesc> loadTableWork = pCtx.getLoadTableWork();
List<LoadFileDesc> loadFileWork = pCtx.getLoadFileWork();
boolean isCStats = pCtx.getQueryProperties().isAnalyzeRewrite();
int outerQueryLimit = pCtx.getQueryProperties().getOuterQueryLimit();
boolean directInsertCtas = false;
if (pCtx.getCreateTable() != null && pCtx.getCreateTable().getStorageHandler() != null) {
try {
directInsertCtas = HiveUtils.getStorageHandler(conf, pCtx.getCreateTable().getStorageHandler()).directInsertCTAS();
} catch (HiveException e) {
throw new SemanticException("Failed to load storage handler: " + e.getMessage());
}
}
if (pCtx.getFetchTask() != null) {
if (pCtx.getFetchTask().getTblDesc() == null) {
return;
}
pCtx.getFetchTask().getWork().setHiveServerQuery(SessionState.get().isHiveServerQuery());
TableDesc resultTab = pCtx.getFetchTask().getTblDesc();
// then either the ThriftFormatter or the DefaultFetchFormatter should be used.
if (!resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName())) {
if (SessionState.get().isHiveServerQuery()) {
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName());
} else {
String formatterName = conf.get(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER);
if (formatterName == null || formatterName.isEmpty()) {
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, DefaultFetchFormatter.class.getName());
}
}
}
return;
}
if (!pCtx.getQueryProperties().isAnalyzeCommand()) {
LOG.debug("Skipping optimize operator plan for analyze command.");
optimizeOperatorPlan(pCtx);
}
/*
* In case of a select, use a fetch task instead of a move task.
* If the select is from analyze table column rewrite, don't create a fetch task. Instead create
* a column stats task later.
*/
if (pCtx.getQueryProperties().isQuery() && !isCStats) {
if ((!loadTableWork.isEmpty()) || (loadFileWork.size() != 1)) {
throw new SemanticException(ErrorMsg.INVALID_LOAD_TABLE_FILE_WORK.getMsg());
}
LoadFileDesc loadFileDesc = loadFileWork.get(0);
String cols = loadFileDesc.getColumns();
String colTypes = loadFileDesc.getColumnTypes();
TableDesc resultTab = pCtx.getFetchTableDesc();
boolean shouldSetOutputFormatter = false;
if (resultTab == null) {
ResultFileFormat resFileFormat = conf.getResultFileFormat();
String fileFormat;
Class<? extends Deserializer> serdeClass;
if (SessionState.get().getIsUsingThriftJDBCBinarySerDe() && resFileFormat == ResultFileFormat.SEQUENCEFILE) {
fileFormat = resFileFormat.toString();
serdeClass = ThriftJDBCBinarySerDe.class;
shouldSetOutputFormatter = true;
} else if (resFileFormat == ResultFileFormat.SEQUENCEFILE) {
// file format is changed so that IF file sink provides list of files to fetch from (instead
// of whole directory) list status is done on files (which is what HiveSequenceFileInputFormat does)
fileFormat = "HiveSequenceFile";
serdeClass = LazySimpleSerDe.class;
} else {
// All other cases we use the defined file format and LazySimpleSerde
fileFormat = resFileFormat.toString();
serdeClass = LazySimpleSerDe.class;
}
resultTab = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
} else {
shouldSetOutputFormatter = resultTab.getProperties().getProperty(serdeConstants.SERIALIZATION_LIB).equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName());
}
if (shouldSetOutputFormatter) {
// Set the fetch formatter to be a no-op for the ListSinkOperator, since we will
// read formatted thrift objects from the output SequenceFile written by Tasks.
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
}
FetchWork fetch = new FetchWork(loadFileDesc.getSourcePath(), resultTab, outerQueryLimit);
boolean isHiveServerQuery = SessionState.get().isHiveServerQuery();
fetch.setHiveServerQuery(isHiveServerQuery);
fetch.setSource(pCtx.getFetchSource());
fetch.setSink(pCtx.getFetchSink());
if (isHiveServerQuery && null != resultTab && resultTab.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS)) {
fetch.setIsUsingThriftJDBCBinarySerDe(true);
} else {
fetch.setIsUsingThriftJDBCBinarySerDe(false);
}
// The idea here is to keep an object reference both in FileSink and in FetchTask for list of files
// to be fetched. During Job close file sink will populate the list and fetch task later will use it
// to fetch the results.
Collection<Operator<?>> tableScanOps = Lists.<Operator<?>>newArrayList(pCtx.getTopOps().values());
Set<FileSinkOperator> fsOps = OperatorUtils.findOperators(tableScanOps, FileSinkOperator.class);
if (fsOps != null && fsOps.size() == 1) {
FileSinkOperator op = fsOps.iterator().next();
Set<FileStatus> filesToFetch = new HashSet<>();
op.getConf().setFilesToFetch(filesToFetch);
fetch.setFilesToFetch(filesToFetch);
}
pCtx.setFetchTask((FetchTask) TaskFactory.get(fetch));
// For the FetchTask, the limit optimization requires we fetch all the rows
// in memory and count how many rows we get. It's not practical if the
// limit factor is too big
int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH);
if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) {
LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimization.");
globalLimitCtx.disableOpt();
}
if (outerQueryLimit == 0) {
// Believe it or not, some tools do generate queries with limit 0 and than expect
// query to run quickly. Lets meet their requirement.
LOG.info("Limit 0. No query execution needed.");
return;
}
} else if (!isCStats) {
for (LoadTableDesc ltd : loadTableWork) {
Task<MoveWork> tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false));
mvTask.add(tsk);
}
boolean oneLoadFileForCtas = true;
for (LoadFileDesc lfd : loadFileWork) {
if (pCtx.getQueryProperties().isCTAS() || pCtx.getQueryProperties().isMaterializedView()) {
if (!oneLoadFileForCtas) {
// should not have more than 1 load file for CTAS.
throw new SemanticException("One query is not expected to contain multiple CTAS loads statements");
}
setLoadFileLocation(pCtx, lfd);
oneLoadFileForCtas = false;
}
mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false)));
}
}
generateTaskTree(rootTasks, pCtx, mvTask, inputs, outputs);
// For each task, set the key descriptor for the reducer
for (Task<?> rootTask : rootTasks) {
GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
}
// to be used, please do so
for (Task<?> rootTask : rootTasks) {
setInputFormat(rootTask);
}
optimizeTaskPlan(rootTasks, pCtx, ctx);
/*
* If the query was the result of analyze table column compute statistics rewrite, create
* a column stats task instead of a fetch task to persist stats to the metastore.
* As per HIVE-15903, we will also collect table stats when user computes column stats.
* That means, if isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()
* We need to collect table stats
* if isCStats, we need to include a basic stats task
* else it is ColumnStatsAutoGather, which should have a move task with a stats task already.
*/
if (isCStats || !pCtx.getColumnStatsAutoGatherContexts().isEmpty()) {
// map from tablename to task (ColumnStatsTask which includes a BasicStatsTask)
Map<String, StatsTask> map = new LinkedHashMap<>();
if (isCStats) {
if (rootTasks == null || rootTasks.size() != 1 || pCtx.getTopOps() == null || pCtx.getTopOps().size() != 1) {
throw new SemanticException("Can not find correct root task!");
}
try {
Task<?> root = rootTasks.iterator().next();
StatsTask tsk = (StatsTask) genTableStats(pCtx, pCtx.getTopOps().values().iterator().next(), root, outputs);
root.addDependentTask(tsk);
map.put(extractTableFullName(tsk), tsk);
} catch (HiveException e) {
throw new SemanticException(e);
}
genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, map, outerQueryLimit, 0);
} else {
Set<Task<?>> leafTasks = new LinkedHashSet<Task<?>>();
getLeafTasks(rootTasks, leafTasks);
List<Task<?>> nonStatsLeafTasks = new ArrayList<>();
for (Task<?> tsk : leafTasks) {
// map table name to the correct ColumnStatsTask
if (tsk instanceof StatsTask) {
map.put(extractTableFullName((StatsTask) tsk), (StatsTask) tsk);
} else {
nonStatsLeafTasks.add(tsk);
}
}
// add cStatsTask as a dependent of all the nonStatsLeafTasks
for (Task<?> tsk : nonStatsLeafTasks) {
for (Task<?> cStatsTask : map.values()) {
tsk.addDependentTask(cStatsTask);
}
}
for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx.getColumnStatsAutoGatherContexts()) {
if (!columnStatsAutoGatherContext.isInsertInto()) {
genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, 0);
} else {
int numBitVector;
try {
numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf);
} catch (Exception e) {
throw new SemanticException(e.getMessage());
}
genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), columnStatsAutoGatherContext.getLoadFileWork(), map, outerQueryLimit, numBitVector);
}
}
}
}
decideExecMode(rootTasks, ctx, globalLimitCtx);
// ahead of time by the non-native table
if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization() && !directInsertCtas) {
// generate a DDL task and make it a dependent task of the leaf
CreateTableDesc crtTblDesc = pCtx.getCreateTable();
crtTblDesc.validate(conf);
Task<?> crtTblTask = TaskFactory.get(new DDLWork(inputs, outputs, crtTblDesc));
patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtTblTask, CollectionUtils.isEmpty(crtTblDesc.getPartColNames()));
} else if (pCtx.getQueryProperties().isMaterializedView()) {
// generate a DDL task and make it a dependent task of the leaf
CreateMaterializedViewDesc viewDesc = pCtx.getCreateViewDesc();
Task<?> crtViewTask = TaskFactory.get(new DDLWork(inputs, outputs, viewDesc));
patchUpAfterCTASorMaterializedView(rootTasks, inputs, outputs, crtViewTask, CollectionUtils.isEmpty(viewDesc.getPartColNames()));
} else if (pCtx.getMaterializedViewUpdateDesc() != null) {
// If there is a materialized view update desc, we create introduce it at the end
// of the tree.
MaterializedViewUpdateDesc materializedViewDesc = pCtx.getMaterializedViewUpdateDesc();
DDLWork ddlWork = new DDLWork(inputs, outputs, materializedViewDesc);
Set<Task<?>> leafTasks = new LinkedHashSet<Task<?>>();
getLeafTasks(rootTasks, leafTasks);
Task<?> materializedViewTask = TaskFactory.get(ddlWork, conf);
for (Task<?> task : leafTasks) {
task.addDependentTask(materializedViewTask);
}
}
if (globalLimitCtx.isEnable() && pCtx.getFetchTask() != null) {
LOG.info("set least row check for FetchTask: " + globalLimitCtx.getGlobalLimit());
pCtx.getFetchTask().getWork().setLeastNumRows(globalLimitCtx.getGlobalLimit());
}
if (globalLimitCtx.isEnable() && globalLimitCtx.getLastReduceLimitDesc() != null) {
LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());
}
Interner<TableDesc> interner = Interners.newStrongInterner();
// Perform Final chores on generated Map works
// 1. Intern the table descriptors
// 2. Derive final explain attributes based on previous compilation.
GenMapRedUtils.finalMapWorkChores(rootTasks, pCtx.getConf(), interner);
}
use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.
the class SemanticAnalyzer method genFileSinkPlan.
@SuppressWarnings("nls")
protected Operator genFileSinkPlan(String dest, QB qb, Operator input) throws SemanticException {
RowResolver inputRR = opParseCtx.get(input).getRowResolver();
QBMetaData qbm = qb.getMetaData();
Integer destType = qbm.getDestTypeForAlias(dest);
// destination table if any
Table destinationTable = null;
// true for full ACID table and MM table
boolean destTableIsTransactional;
// should the destination table be written to using ACID
boolean destTableIsFullAcid;
// should we add files directly to the final path
boolean isDirectInsert = false;
AcidUtils.Operation acidOperation = null;
boolean destTableIsTemporary = false;
boolean destTableIsMaterialization = false;
// destination partition if any
Partition destinationPartition = null;
// the intermediate destination directory
Path queryTmpdir = null;
String moveTaskId = null;
// the final destination directory
Path destinationPath = null;
TableDesc tableDescriptor = null;
StructObjectInspector specificRowObjectInspector = null;
int currentTableId = 0;
boolean isLocal = false;
SortBucketRSCtx rsCtx = new SortBucketRSCtx();
DynamicPartitionCtx dpCtx = null;
LoadTableDesc ltd = null;
ListBucketingCtx lbCtx = null;
Map<String, String> partSpec = null;
boolean isMmTable = false, isMmCreate = false, isNonNativeTable = false;
Long writeId = null;
HiveTxnManager txnMgr = getTxnMgr();
switch(destType.intValue()) {
case QBMetaData.DEST_TABLE:
{
destinationTable = qbm.getDestTableForAlias(dest);
destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
destTableIsTemporary = destinationTable.isTemporary();
// Is the user trying to insert into a external tables
checkExternalTable(destinationTable);
partSpec = qbm.getPartSpecForAlias(dest);
destinationPath = destinationTable.getPath();
checkImmutableTable(qb, destinationTable, destinationPath, false);
// check for partition
List<FieldSchema> parts = destinationTable.getPartitionKeys();
if (parts != null && parts.size() > 0) {
// table is partitioned
if (partSpec == null || partSpec.size() == 0) {
// user did NOT specify partition
throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.NEED_PARTITION_ERROR.getMsg()));
}
dpCtx = qbm.getDPCtx(dest);
if (dpCtx == null) {
destinationTable.validatePartColumnNames(partSpec, false);
dpCtx = new DynamicPartitionCtx(partSpec, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
qbm.setDPCtx(dest, dpCtx);
}
}
// Check for dynamic partitions.
dpCtx = checkDynPart(qb, qbm, destinationTable, partSpec, dest);
if (dpCtx != null && dpCtx.getSPPath() != null) {
destinationPath = new Path(destinationTable.getPath(), dpCtx.getSPPath());
}
isNonNativeTable = destinationTable.isNonNative();
isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
// this table_desc does not contain the partitioning columns
tableDescriptor = Utilities.getTableDesc(destinationTable);
if (!isNonNativeTable) {
if (destTableIsTransactional) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
}
}
isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
acidOperation = acidOp;
queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
moveTaskId = getMoveTaskId();
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + destinationPath);
}
if (dpCtx != null) {
// set the root of the temporary path where dynamic partition columns will populate
dpCtx.setRootPath(queryTmpdir);
}
// Add NOT NULL constraint check
input = genConstraintsPlan(dest, qb, input);
if (!qb.getIsQuery()) {
input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, parts);
}
if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
// Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
// TODO: We only do this for a full rebuild
String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
if (sortColsStr != null || distributeColsStr != null) {
input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
}
} else {
// Add sorting/bucketing if needed
input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
}
idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
currentTableId = destTableId;
destTableId++;
// NOTE: specify Dynamic partitions in dest_tab for WriteEntity
if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
if (destTableIsTransactional) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
checkAcidConstraints();
} else {
lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(), destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(), destinationTable.isStoredAsSubDirectories());
}
try {
if (ctx.getExplainConfig() != null) {
// For explain plan, txn won't be opened and doesn't make sense to allocate write id
writeId = null;
} else {
if (isMmTable) {
writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
} else {
writeId = acidOp == Operation.NOT_ACID ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
}
}
} catch (LockException ex) {
throw new SemanticException("Failed to allocate write Id", ex);
}
boolean isReplace = !qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, isReplace, writeId);
if (writeId != null) {
ltd.setStmtId(txnMgr.getCurrentStmtId());
}
ltd.setMoveTaskId(moveTaskId);
// For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
// deltas and base and leave them up to the cleaner to clean up
boolean isInsertInto = qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName());
LoadFileType loadType;
if (isDirectInsert) {
loadType = LoadFileType.IGNORE;
} else if (!isInsertInto && !destTableIsTransactional) {
loadType = LoadFileType.REPLACE_ALL;
} else {
loadType = LoadFileType.KEEP_EXISTING;
}
ltd.setLoadFileType(loadType);
ltd.setInsertOverwrite(!isInsertInto);
ltd.setIsDirectInsert(isDirectInsert);
ltd.setLbCtx(lbCtx);
loadTableWork.add(ltd);
} else {
// This is a non-native table.
// We need to set stats as inaccurate.
setStatsForNonNativeTable(destinationTable.getDbName(), destinationTable.getTableName());
// true if it is insert overwrite.
boolean overwrite = !qb.getParseInfo().isInsertIntoTable(String.format("%s.%s", destinationTable.getDbName(), destinationTable.getTableName()));
createPreInsertDesc(destinationTable, overwrite);
ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, partSpec == null ? ImmutableMap.of() : partSpec);
ltd.setInsertOverwrite(overwrite);
ltd.setLoadFileType(overwrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING);
}
if (destinationTable.isMaterializedView()) {
materializedViewUpdateDesc = new MaterializedViewUpdateDesc(destinationTable.getFullyQualifiedName(), false, false, true);
}
WriteEntity output = generateTableWriteEntity(dest, destinationTable, partSpec, ltd, dpCtx);
ctx.getLoadTableOutputMap().put(ltd, output);
break;
}
case QBMetaData.DEST_PARTITION:
{
destinationPartition = qbm.getDestPartitionForAlias(dest);
destinationTable = destinationPartition.getTable();
destTableIsTransactional = AcidUtils.isTransactionalTable(destinationTable);
destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
checkExternalTable(destinationTable);
Path partPath = destinationPartition.getDataLocation();
checkImmutableTable(qb, destinationTable, partPath, true);
// Previous behavior (HIVE-1707) used to replace the partition's dfs with the table's dfs.
// The changes in HIVE-19891 appears to no longer support that behavior.
destinationPath = partPath;
if (MetaStoreUtils.isArchived(destinationPartition.getTPartition())) {
try {
String conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, destinationTable, destinationPartition.getSpec());
String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
throw new SemanticException(message);
} catch (SemanticException err) {
throw err;
} catch (HiveException err) {
throw new SemanticException(err);
}
}
isNonNativeTable = destinationTable.isNonNative();
isMmTable = AcidUtils.isInsertOnlyTable(destinationTable.getParameters());
AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
// this table_desc does not contain the partitioning columns
tableDescriptor = Utilities.getTableDesc(destinationTable);
if (!isNonNativeTable) {
if (destTableIsTransactional) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
}
}
isDirectInsert = isDirectInsert(destTableIsFullAcid, acidOp);
acidOperation = acidOp;
queryTmpdir = getTmpDir(isNonNativeTable, isMmTable, isDirectInsert, destinationPath);
moveTaskId = getMoveTaskId();
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + destinationPath);
}
// Add NOT NULL constraint check
input = genConstraintsPlan(dest, qb, input);
if (!qb.getIsQuery()) {
input = genConversionSelectOperator(dest, qb, input, destinationTable.getDeserializer(), dpCtx, null);
}
if (destinationTable.isMaterializedView() && mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) {
// Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
// TODO: We only do this for a full rebuild
String sortColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_SORT_COLUMNS);
String distributeColsStr = destinationTable.getProperty(Constants.MATERIALIZED_VIEW_DISTRIBUTE_COLUMNS);
if (sortColsStr != null || distributeColsStr != null) {
input = genMaterializedViewDataOrgPlan(destinationTable, sortColsStr, distributeColsStr, inputRR, input);
}
} else {
// Add sorting/bucketing if needed
input = genBucketingSortingDest(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
}
idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
currentTableId = destTableId;
destTableId++;
if (destTableIsTransactional) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
checkAcidConstraints();
} else {
// Transactional tables can't be list bucketed or have skewed cols
lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(), destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(), destinationPartition.isStoredAsSubDirectories());
}
try {
if (ctx.getExplainConfig() != null) {
// For explain plan, txn won't be opened and doesn't make sense to allocate write id
writeId = null;
} else {
if (isMmTable) {
writeId = txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
} else {
writeId = (acidOp == Operation.NOT_ACID) ? null : txnMgr.getTableWriteId(destinationTable.getDbName(), destinationTable.getTableName());
}
}
} catch (LockException ex) {
throw new SemanticException("Failed to allocate write Id", ex);
}
ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, destinationPartition.getSpec(), acidOp, writeId);
if (writeId != null) {
ltd.setStmtId(txnMgr.getCurrentStmtId());
}
// For the current context for generating File Sink Operator, it is either INSERT INTO or INSERT OVERWRITE.
// So the next line works.
boolean isInsertInto = !qb.getParseInfo().isDestToOpTypeInsertOverwrite(dest);
// For Acid table, Insert Overwrite shouldn't replace the table content. We keep the old
// deltas and base and leave them up to the cleaner to clean up
LoadFileType loadType;
if (isDirectInsert) {
loadType = LoadFileType.IGNORE;
} else if (!isInsertInto && !destTableIsTransactional) {
loadType = LoadFileType.REPLACE_ALL;
} else {
loadType = LoadFileType.KEEP_EXISTING;
}
ltd.setLoadFileType(loadType);
ltd.setInsertOverwrite(!isInsertInto);
ltd.setIsDirectInsert(isDirectInsert);
ltd.setLbCtx(lbCtx);
ltd.setMoveTaskId(moveTaskId);
loadTableWork.add(ltd);
if (!outputs.add(new WriteEntity(destinationPartition, determineWriteType(ltd, dest)))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationTable.getTableName() + "@" + destinationPartition.getName()));
}
break;
}
case QBMetaData.DEST_LOCAL_FILE:
isLocal = true;
// fall through
case QBMetaData.DEST_DFS_FILE:
{
destinationPath = getDestinationFilePath(qbm.getDestFileForAlias(dest), isMmTable);
// CTAS case: the file output format and serde are defined by the create
// table command rather than taking the default value
List<FieldSchema> fieldSchemas = null;
List<FieldSchema> partitionColumns = null;
List<String> partitionColumnNames = null;
List<FieldSchema> sortColumns = null;
List<String> sortColumnNames = null;
List<FieldSchema> distributeColumns = null;
List<String> distributeColumnNames = null;
List<ColumnInfo> fileSinkColInfos = null;
List<ColumnInfo> sortColInfos = null;
List<ColumnInfo> distributeColInfos = null;
TableName tableName = null;
Map<String, String> tblProps = null;
CreateTableDesc tblDesc = qb.getTableDesc();
CreateMaterializedViewDesc viewDesc = qb.getViewDesc();
if (tblDesc != null) {
fieldSchemas = new ArrayList<>();
partitionColumns = new ArrayList<>();
partitionColumnNames = tblDesc.getPartColNames();
fileSinkColInfos = new ArrayList<>();
destTableIsTemporary = tblDesc.isTemporary();
destTableIsMaterialization = tblDesc.isMaterialization();
tableName = TableName.fromString(tblDesc.getDbTableName(), null, tblDesc.getDatabaseName());
tblProps = tblDesc.getTblProps();
} else if (viewDesc != null) {
fieldSchemas = new ArrayList<>();
partitionColumns = new ArrayList<>();
partitionColumnNames = viewDesc.getPartColNames();
sortColumns = new ArrayList<>();
sortColumnNames = viewDesc.getSortColNames();
distributeColumns = new ArrayList<>();
distributeColumnNames = viewDesc.getDistributeColNames();
fileSinkColInfos = new ArrayList<>();
sortColInfos = new ArrayList<>();
distributeColInfos = new ArrayList<>();
destTableIsTemporary = false;
destTableIsMaterialization = false;
tableName = HiveTableName.ofNullableWithNoDefault(viewDesc.getViewName());
tblProps = viewDesc.getTblProps();
}
destTableIsTransactional = tblProps != null && AcidUtils.isTablePropertyTransactional(tblProps);
if (destTableIsTransactional) {
try {
if (ctx.getExplainConfig() != null) {
// For explain plan, txn won't be opened and doesn't make sense to allocate write id
writeId = 0L;
} else {
writeId = txnMgr.getTableWriteId(tableName.getDb(), tableName.getTable());
}
} catch (LockException ex) {
throw new SemanticException("Failed to allocate write Id", ex);
}
if (AcidUtils.isInsertOnlyTable(tblProps, true)) {
isMmTable = isMmCreate = true;
if (tblDesc != null) {
tblDesc.setInitialMmWriteId(writeId);
} else {
viewDesc.setInitialMmWriteId(writeId);
}
}
}
if (isLocal) {
assert !isMmTable;
// for local directory - we always write to map-red intermediate
// store and then copy to local fs
queryTmpdir = ctx.getMRTmpPath();
} else {
// no copy is required. we may want to revisit this policy in future
try {
Path qPath = FileUtils.makeQualified(destinationPath, conf);
queryTmpdir = isMmTable ? qPath : ctx.getTempDirForFinalJobPath(qPath);
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("Setting query directory " + queryTmpdir + " from " + destinationPath + " (" + isMmTable + ")");
}
} catch (Exception e) {
throw new SemanticException("Error creating temporary folder on: " + destinationPath, e);
}
}
// Check for dynamic partitions.
final String cols, colTypes;
final boolean isPartitioned;
if (dpCtx != null) {
throw new SemanticException("Dynamic partition context has already been created, this should not happen");
}
if (!CollectionUtils.isEmpty(partitionColumnNames)) {
ColsAndTypes ct = deriveFileSinkColTypes(inputRR, partitionColumnNames, sortColumnNames, distributeColumnNames, fieldSchemas, partitionColumns, sortColumns, distributeColumns, fileSinkColInfos, sortColInfos, distributeColInfos);
cols = ct.cols;
colTypes = ct.colTypes;
dpCtx = new DynamicPartitionCtx(partitionColumnNames, conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE));
qbm.setDPCtx(dest, dpCtx);
// set the root of the temporary path where dynamic partition columns will populate
dpCtx.setRootPath(queryTmpdir);
isPartitioned = true;
} else {
ColsAndTypes ct = deriveFileSinkColTypes(inputRR, sortColumnNames, distributeColumnNames, fieldSchemas, sortColumns, distributeColumns, sortColInfos, distributeColInfos);
cols = ct.cols;
colTypes = ct.colTypes;
isPartitioned = false;
}
// update the create table descriptor with the resulting schema.
if (tblDesc != null) {
tblDesc.setCols(new ArrayList<>(fieldSchemas));
tblDesc.setPartCols(new ArrayList<>(partitionColumns));
} else if (viewDesc != null) {
viewDesc.setSchema(new ArrayList<>(fieldSchemas));
viewDesc.setPartCols(new ArrayList<>(partitionColumns));
if (viewDesc.isOrganized()) {
viewDesc.setSortCols(new ArrayList<>(sortColumns));
viewDesc.setDistributeCols(new ArrayList<>(distributeColumns));
}
}
boolean isDestTempFile = true;
if (ctx.isMRTmpFileURI(destinationPath.toUri().toString()) == false && ctx.isResultCacheDir(destinationPath) == false) {
// not a temp dir and not a result cache dir
idToTableNameMap.put(String.valueOf(destTableId), destinationPath.toUri().toString());
currentTableId = destTableId;
destTableId++;
isDestTempFile = false;
}
if (tblDesc == null) {
if (viewDesc != null) {
tableDescriptor = PlanUtils.getTableDesc(viewDesc, cols, colTypes);
} else if (qb.getIsQuery()) {
Class<? extends Deserializer> serdeClass = LazySimpleSerDe.class;
String fileFormat = conf.getResultFileFormat().toString();
if (SessionState.get().getIsUsingThriftJDBCBinarySerDe()) {
serdeClass = ThriftJDBCBinarySerDe.class;
fileFormat = ResultFileFormat.SEQUENCEFILE.toString();
// Set the fetch formatter to be a no-op for the ListSinkOperator, since we'll
// write out formatted thrift objects to SequenceFile
conf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, NoOpFetchFormatter.class.getName());
} else if (fileFormat.equals(PlanUtils.LLAP_OUTPUT_FORMAT_KEY)) {
// If this output format is Llap, check to see if Arrow is requested
boolean useArrow = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_OUTPUT_FORMAT_ARROW);
serdeClass = useArrow ? ArrowColumnarBatchSerDe.class : LazyBinarySerDe2.class;
}
tableDescriptor = PlanUtils.getDefaultQueryOutputTableDesc(cols, colTypes, fileFormat, serdeClass);
} else {
tableDescriptor = PlanUtils.getDefaultTableDesc(qb.getDirectoryDesc(), cols, colTypes);
}
} else {
tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes);
}
// if available, set location in table desc properties
if (tblDesc != null && tblDesc.getLocation() != null && tableDescriptor != null && !tableDescriptor.getProperties().containsKey(hive_metastoreConstants.META_TABLE_LOCATION)) {
tableDescriptor.getProperties().setProperty(hive_metastoreConstants.META_TABLE_LOCATION, tblDesc.getLocation());
}
// We need a specific rowObjectInspector in this case
try {
specificRowObjectInspector = (StructObjectInspector) tableDescriptor.getDeserializer(conf).getObjectInspector();
} catch (Exception e) {
throw new SemanticException(e.getMessage(), e);
}
boolean isDfsDir = (destType == QBMetaData.DEST_DFS_FILE);
try {
destinationTable = tblDesc != null ? tblDesc.toTable(conf) : viewDesc != null ? viewDesc.toTable(conf) : null;
} catch (HiveException e) {
throw new SemanticException(e);
}
destTableIsFullAcid = AcidUtils.isFullAcidTable(destinationTable);
// Data organization (DISTRIBUTED, SORTED, CLUSTERED) for materialized view
if (viewDesc != null && viewDesc.isOrganized()) {
input = genMaterializedViewDataOrgPlan(sortColInfos, distributeColInfos, inputRR, input);
}
moveTaskId = getMoveTaskId();
if (isPartitioned) {
// Create a SELECT that may reorder the columns if needed
RowResolver rowResolver = new RowResolver();
List<ExprNodeDesc> columnExprs = new ArrayList<>();
List<String> colNames = new ArrayList<>();
Map<String, ExprNodeDesc> colExprMap = new HashMap<>();
for (int i = 0; i < fileSinkColInfos.size(); i++) {
ColumnInfo ci = fileSinkColInfos.get(i);
ExprNodeDesc columnExpr = new ExprNodeColumnDesc(ci);
String name = getColumnInternalName(i);
rowResolver.put("", name, new ColumnInfo(name, columnExpr.getTypeInfo(), "", false));
columnExprs.add(columnExpr);
colNames.add(name);
colExprMap.put(name, columnExpr);
}
input = putOpInsertMap(OperatorFactory.getAndMakeChild(new SelectDesc(columnExprs, colNames), new RowSchema(rowResolver.getColumnInfos()), input), rowResolver);
input.setColumnExprMap(colExprMap);
// If this is a partitioned CTAS or MV statement, we are going to create a LoadTableDesc
// object. Although the table does not exist in metastore, we will swap the CreateTableTask
// and MoveTask resulting from this LoadTable so in this specific case, first we create
// the metastore table, then we move and commit the partitions. At least for the time being,
// this order needs to be enforced because metastore expects a table to exist before we can
// add any partitions to it.
isNonNativeTable = tableDescriptor.isNonNative();
if (!isNonNativeTable || destinationTable.getStorageHandler().commitInMoveTask()) {
AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID;
if (destTableIsTransactional) {
acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
checkAcidConstraints();
}
// isReplace = false in case concurrent operation is executed
ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx, acidOp, false, writeId);
if (writeId != null) {
ltd.setStmtId(txnMgr.getCurrentStmtId());
}
ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
ltd.setInsertOverwrite(false);
loadTableWork.add(ltd);
} else {
// This is a non-native table.
// We need to set stats as inaccurate.
setStatsForNonNativeTable(tableDescriptor.getDbName(), tableDescriptor.getTableName());
ltd = new LoadTableDesc(queryTmpdir, tableDescriptor, dpCtx.getPartSpec());
ltd.setInsertOverwrite(false);
ltd.setLoadFileType(LoadFileType.KEEP_EXISTING);
}
ltd.setMoveTaskId(moveTaskId);
ltd.setMdTable(destinationTable);
WriteEntity output = generateTableWriteEntity(dest, destinationTable, dpCtx.getPartSpec(), ltd, dpCtx);
ctx.getLoadTableOutputMap().put(ltd, output);
} else {
// Create LFD even for MM CTAS - it's a no-op move, but it still seems to be used for stats.
LoadFileDesc loadFileDesc = new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, destinationPath, isDfsDir, cols, colTypes, // there is a change here - prev version had 'transactional', one before 'acid'
destTableIsFullAcid ? Operation.INSERT : Operation.NOT_ACID, isMmCreate);
loadFileDesc.setMoveTaskId(moveTaskId);
loadFileWork.add(loadFileDesc);
try {
Path qualifiedPath = destinationPath.getFileSystem(conf).makeQualified(destinationPath);
if (!outputs.add(new WriteEntity(qualifiedPath, !isDfsDir, isDestTempFile))) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(destinationPath.toUri().toString()));
}
} catch (IOException ex) {
throw new SemanticException("Error while getting the full qualified path for the given directory: " + ex.getMessage());
}
}
break;
}
default:
throw new SemanticException("Unknown destination type: " + destType);
}
inputRR = opParseCtx.get(input).getRowResolver();
List<ColumnInfo> vecCol = new ArrayList<ColumnInfo>();
if (updating(dest) || deleting(dest)) {
vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true));
} else {
try {
// If we already have a specific inspector (view or directory as a target) use that
// Otherwise use the table deserializer to get the inspector
StructObjectInspector rowObjectInspector = specificRowObjectInspector != null ? specificRowObjectInspector : (StructObjectInspector) destinationTable.getDeserializer().getObjectInspector();
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
for (StructField field : fields) {
vecCol.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), "", false));
}
} catch (Exception e) {
throw new SemanticException(e.getMessage(), e);
}
}
RowSchema fsRS = new RowSchema(vecCol);
// The output files of a FileSink can be merged if they are either not being written to a table
// or are being written to a table which is not bucketed
// and table the table is not sorted
boolean canBeMerged = (destinationTable == null || !((destinationTable.getNumBuckets() > 0) || (destinationTable.getSortCols() != null && destinationTable.getSortCols().size() > 0)));
// If this table is working with ACID semantics, turn off merging
canBeMerged &= !destTableIsFullAcid;
// Generate the partition columns from the parent input
if (destType == QBMetaData.DEST_TABLE || destType == QBMetaData.DEST_PARTITION) {
genPartnCols(dest, input, qb, tableDescriptor, destinationTable, rsCtx);
}
FileSinkDesc fileSinkDesc = createFileSinkDesc(dest, tableDescriptor, destinationPartition, // this was 1/4 acid
destinationPath, // this was 1/4 acid
currentTableId, // this was 1/4 acid
destTableIsFullAcid, // this was 1/4 acid
destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, destinationTable, writeId, isMmCreate, destType, qb, isDirectInsert, acidOperation, moveTaskId);
if (isMmCreate) {
// Add FSD so that the LoadTask compilation could fix up its path to avoid the move.
if (tableDesc != null) {
tableDesc.setWriter(fileSinkDesc);
} else {
createVwDesc.setWriter(fileSinkDesc);
}
}
if (fileSinkDesc.getInsertOverwrite()) {
if (ltd != null) {
ltd.setInsertOverwrite(true);
}
}
if (null != tableDescriptor && useBatchingSerializer(tableDescriptor.getSerdeClassName())) {
fileSinkDesc.setIsUsingBatchingSerDe(true);
} else {
fileSinkDesc.setIsUsingBatchingSerDe(false);
}
Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(fileSinkDesc, fsRS, input), inputRR);
// in case of a merge statement.
if (!isDirectInsert || acidOperation == AcidUtils.Operation.INSERT) {
handleLineage(ltd, output);
}
setWriteIdForSurrogateKeys(ltd, input);
LOG.debug("Created FileSink Plan for clause: {}dest_path: {} row schema: {}", dest, destinationPath, inputRR);
FileSinkOperator fso = (FileSinkOperator) output;
fso.getConf().setTable(destinationTable);
// and it is an insert overwrite or insert into table
if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) && destinationTable != null && (!destinationTable.isNonNative() || destinationTable.getStorageHandler().commitInMoveTask()) && !destTableIsTemporary && !destTableIsMaterialization && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) {
if (destType == QBMetaData.DEST_TABLE) {
genAutoColumnStatsGatheringPipeline(destinationTable, partSpec, input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
} else if (destType == QBMetaData.DEST_PARTITION) {
genAutoColumnStatsGatheringPipeline(destinationTable, destinationPartition.getSpec(), input, qb.getParseInfo().isInsertIntoTable(destinationTable.getDbName(), destinationTable.getTableName()), false);
} else if (destType == QBMetaData.DEST_LOCAL_FILE || destType == QBMetaData.DEST_DFS_FILE) {
// CTAS or CMV statement
genAutoColumnStatsGatheringPipeline(destinationTable, null, input, false, true);
}
}
return output;
}
use of org.apache.hadoop.hive.ql.plan.LoadTableDesc in project hive by apache.
the class ImportSemanticAnalyzer method addSinglePartition.
private static Task<?> addSinglePartition(ImportTableDesc tblDesc, Table table, Warehouse wh, AlterTableAddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x, Long writeId, int stmtId, boolean isReplication, String dumpRoot, ReplicationMetricCollector metricCollector) throws MetaException, IOException, HiveException {
AlterTableAddPartitionDesc.PartitionDesc partSpec = addPartitionDesc.getPartitions().get(0);
boolean isSkipTrash = false;
boolean needRecycle = false;
if (shouldSkipDataCopyInReplScope(tblDesc, replicationSpec) || (tblDesc.isExternal() && tblDesc.getLocation() == null)) {
x.getLOG().debug("Adding AddPart and skipped data copy for partition " + partSpecToString(partSpec.getPartSpec()));
// addPartitionDesc already has the right partition location
@SuppressWarnings("unchecked") Task<?> addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, isReplication, dumpRoot, metricCollector), x.getConf());
return addPartTask;
} else {
String srcLocation = partSpec.getLocation();
if (replicationSpec.isInReplicationScope() && !ReplicationSpec.Type.IMPORT.equals(replicationSpec.getReplSpecType())) {
Path partLocation = new Path(partSpec.getLocation());
Path dataDirBase = partLocation.getParent();
String bucketDir = partLocation.getName();
for (int i = 1; i < partSpec.getPartSpec().size(); i++) {
bucketDir = dataDirBase.getName() + File.separator + bucketDir;
dataDirBase = dataDirBase.getParent();
}
String relativePartDataPath = EximUtil.DATA_PATH_NAME + File.separator + bucketDir;
srcLocation = new Path(dataDirBase, relativePartDataPath).toString();
}
fixLocationInPartSpec(tblDesc, table, wh, replicationSpec, partSpec, x);
x.getLOG().debug("adding dependent CopyWork/AddPart/MoveWork for partition " + partSpecToString(partSpec.getPartSpec()) + " with source location: " + srcLocation);
Path tgtLocation = new Path(partSpec.getLocation());
LoadFileType loadFileType;
Path destPath;
if (replicationSpec.isInReplicationScope()) {
loadFileType = LoadFileType.IGNORE;
destPath = tgtLocation;
isSkipTrash = MetaStoreUtils.isSkipTrash(table.getParameters());
if (table.isTemporary()) {
needRecycle = false;
} else {
org.apache.hadoop.hive.metastore.api.Database db = x.getHive().getDatabase(table.getDbName());
needRecycle = db != null && ReplChangeManager.shouldEnableCm(db, table.getTTable());
}
} else {
loadFileType = replicationSpec.isReplace() ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING;
// Replication scope the write id will be invalid
boolean useStagingDirectory = !AcidUtils.isTransactionalTable(table.getParameters()) || replicationSpec.isInReplicationScope();
destPath = useStagingDirectory ? x.getCtx().getExternalTmpPath(tgtLocation) : new Path(tgtLocation, AcidUtils.deltaSubdir(writeId, writeId, stmtId));
}
Path moveTaskSrc = !AcidUtils.isTransactionalTable(table.getParameters()) || replicationSpec.isInReplicationScope() ? destPath : tgtLocation;
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("adding import work for partition with source location: " + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm " + writeId + " for " + partSpecToString(partSpec.getPartSpec()) + ": " + (AcidUtils.isFullAcidTable(table) ? "acid" : (AcidUtils.isInsertOnlyTable(table) ? "mm" : "flat")));
}
Task<?> copyTask = null;
if (replicationSpec.isInReplicationScope()) {
boolean copyAtLoad = x.getConf().getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET);
copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, new Path(srcLocation), destPath, x.getConf(), isSkipTrash, needRecycle, copyAtLoad, dumpRoot, metricCollector);
} else {
copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), destPath, false, dumpRoot, metricCollector, isReplication));
}
Task<?> addPartTask = null;
if (x.getEventType() != DumpType.EVENT_COMMIT_TXN) {
// During replication, by the time we are applying commit transaction event, we expect
// the partition/s to be already added or altered by previous events. So no need to
// create add partition event again.
addPartTask = TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc, isReplication, dumpRoot, metricCollector), x.getConf());
}
MoveWork moveWork = new MoveWork(x.getInputs(), x.getOutputs(), null, null, false, dumpRoot, metricCollector, isReplication);
// See setLoadFileType and setIsAcidIow calls elsewhere for an example.
if (replicationSpec.isInReplicationScope() && AcidUtils.isTransactionalTable(tblDesc.getTblProps())) {
LoadMultiFilesDesc loadFilesWork = new LoadMultiFilesDesc(Collections.singletonList(destPath), Collections.singletonList(tgtLocation), true, null, null);
moveWork.setMultiFilesDesc(loadFilesWork);
moveWork.setNeedCleanTarget(replicationSpec.isReplace());
} else {
LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table), partSpec.getPartSpec(), loadFileType, writeId);
loadTableWork.setStmtId(stmtId);
loadTableWork.setInheritTableSpecs(false);
moveWork.setLoadTableWork(loadTableWork);
}
if (loadFileType == LoadFileType.IGNORE) {
// update which is again done in load operations as part of move task.
if (x.getEventType() == DumpType.EVENT_INSERT) {
copyTask.addDependentTask(TaskFactory.get(moveWork, x.getConf()));
} else {
if (addPartTask != null) {
copyTask.addDependentTask(addPartTask);
}
}
return copyTask;
}
Task<?> loadPartTask = TaskFactory.get(moveWork, x.getConf());
copyTask.addDependentTask(loadPartTask);
if (addPartTask != null) {
addPartTask.addDependentTask(loadPartTask);
x.getTasks().add(copyTask);
return addPartTask;
}
return copyTask;
}
}
Aggregations