use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method validateCreateView.
// validate the (materialized) view statement
// check semantic conditions
private void validateCreateView() throws SemanticException {
try {
Table oldView = getTable(createVwDesc.getViewName(), false);
// Do not allow view to be defined on temp table or other materialized view
Set<String> tableAliases = qb.getTabAliases();
for (String alias : tableAliases) {
try {
Table table = this.getTableObjectByName(qb.getTabNameForAlias(alias));
if (table.isTemporary()) {
throw new SemanticException("View definition references temporary table " + alias);
}
if (table.isMaterializedView()) {
throw new SemanticException("View definition references materialized view " + alias);
}
if (createVwDesc.isMaterialized() && createVwDesc.isRewriteEnabled() && !AcidUtils.isTransactionalTable(table)) {
throw new SemanticException("Automatic rewriting for materialized view cannot " + "be enabled if the materialized view uses non-transactional tables");
}
} catch (HiveException ex) {
throw new SemanticException(ex);
}
}
// ALTER VIEW AS SELECT requires the view must exist
if (createVwDesc.getIsAlterViewAs() && oldView == null) {
String viewNotExistErrorMsg = "The following view does not exist: " + createVwDesc.getViewName();
throw new SemanticException(ErrorMsg.ALTER_VIEW_AS_SELECT_NOT_EXIST.getMsg(viewNotExistErrorMsg));
}
// replace view
if (createVwDesc.isReplace() && oldView != null) {
// Don't allow swapping between virtual and materialized view in replace
if (oldView.getTableType().equals(TableType.VIRTUAL_VIEW) && createVwDesc.isMaterialized()) {
throw new SemanticException(ErrorMsg.REPLACE_VIEW_WITH_MATERIALIZED, oldView.getTableName());
}
if (oldView.getTableType().equals(TableType.MATERIALIZED_VIEW) && !createVwDesc.isMaterialized()) {
throw new SemanticException(ErrorMsg.REPLACE_MATERIALIZED_WITH_VIEW, oldView.getTableName());
}
// Existing table is not a view
if (!oldView.getTableType().equals(TableType.VIRTUAL_VIEW) && !oldView.getTableType().equals(TableType.MATERIALIZED_VIEW)) {
String tableNotViewErrorMsg = "The following is an existing table, not a view: " + createVwDesc.getViewName();
throw new SemanticException(ErrorMsg.EXISTING_TABLE_IS_NOT_VIEW.getMsg(tableNotViewErrorMsg));
}
if (!createVwDesc.isMaterialized()) {
// if old view has partitions, it could not be replaced
String partitionViewErrorMsg = "The following view has partition, it could not be replaced: " + createVwDesc.getViewName();
try {
if ((createVwDesc.getPartCols() == null || createVwDesc.getPartCols().isEmpty() || !createVwDesc.getPartCols().equals(oldView.getPartCols())) && !oldView.getPartCols().isEmpty() && !db.getPartitions(oldView).isEmpty()) {
throw new SemanticException(ErrorMsg.REPLACE_VIEW_WITH_PARTITION.getMsg(partitionViewErrorMsg));
}
} catch (HiveException e) {
throw new SemanticException(ErrorMsg.REPLACE_VIEW_WITH_PARTITION.getMsg(partitionViewErrorMsg));
}
}
}
} catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method analyzeCreateTable.
/**
* Analyze the create table command. If it is a regular create-table or
* create-table-like statements, we create a DDLWork and return true. If it is
* a create-table-as-select, we get the necessary info such as the SerDe and
* Storage Format and put it in QB, and return false, indicating the rest of
* the semantic analyzer need to deal with the select statement with respect
* to the SerDe and Storage Format.
*/
ASTNode analyzeCreateTable(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
String[] qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
String dbDotTab = getDotName(qualifiedTabName);
String likeTableName = null;
List<FieldSchema> cols = new ArrayList<FieldSchema>();
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
List<String> bucketCols = new ArrayList<String>();
List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
List<SQLUniqueConstraint> uniqueConstraints = new ArrayList<>();
List<SQLNotNullConstraint> notNullConstraints = new ArrayList<>();
List<SQLDefaultConstraint> defaultConstraints = new ArrayList<>();
List<SQLCheckConstraint> checkConstraints = new ArrayList<>();
List<Order> sortCols = new ArrayList<Order>();
int numBuckets = -1;
String comment = null;
String location = null;
Map<String, String> tblProps = null;
boolean ifNotExists = false;
boolean isExt = false;
boolean isTemporary = false;
boolean isMaterialization = false;
ASTNode selectStmt = null;
// regular CREATE TABLE
final int CREATE_TABLE = 0;
// CREATE TABLE LIKE ... (CTLT)
final int CTLT = 1;
// CREATE TABLE AS SELECT ... (CTAS)
final int CTAS = 2;
int command_type = CREATE_TABLE;
List<String> skewedColNames = new ArrayList<String>();
List<List<String>> skewedValues = new ArrayList<List<String>>();
Map<List<String>, String> listBucketColValuesMapping = new HashMap<List<String>, String>();
boolean storedAsDirs = false;
boolean isUserStorageFormat = false;
RowFormatParams rowFormatParams = new RowFormatParams();
StorageFormat storageFormat = new StorageFormat(conf);
LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
int numCh = ast.getChildCount();
/*
* Check the 1st-level children and do simple semantic checks: 1) CTLT and
* CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
* list (target table schema). 3) CTAS does not support partitioning (for
* now).
*/
for (int num = 1; num < numCh; num++) {
ASTNode child = (ASTNode) ast.getChild(num);
if (storageFormat.fillStorageFormat(child)) {
isUserStorageFormat = true;
continue;
}
switch(child.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.KW_EXTERNAL:
isExt = true;
break;
case HiveParser.KW_TEMPORARY:
isTemporary = true;
isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
break;
case HiveParser.TOK_LIKETABLE:
if (child.getChildCount() > 0) {
likeTableName = getUnescapedName((ASTNode) child.getChild(0));
if (likeTableName != null) {
if (command_type == CTAS) {
throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
}
if (cols.size() != 0) {
throw new SemanticException(ErrorMsg.CTLT_COLLST_COEXISTENCE.getMsg());
}
}
command_type = CTLT;
}
break;
case // CTAS
HiveParser.TOK_QUERY:
if (command_type == CTLT) {
throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
}
if (cols.size() != 0) {
throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg());
}
if (partCols.size() != 0 || bucketCols.size() != 0) {
boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING);
if (dynPart == false) {
throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
} else {
// TODO: support dynamic partition for CTAS
throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
}
}
if (isExt) {
throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
}
command_type = CTAS;
if (plannerCtx != null) {
plannerCtx.setCTASToken(child);
}
selectStmt = child;
break;
case HiveParser.TOK_TABCOLLIST:
cols = getColumns(child, true, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
break;
case HiveParser.TOK_TABLECOMMENT:
comment = unescapeSQLString(child.getChild(0).getText());
break;
case HiveParser.TOK_TABLEPARTCOLS:
partCols = getColumns(child, false, ctx.getTokenRewriteStream(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
if (hasConstraints(partCols, defaultConstraints, notNullConstraints, checkConstraints)) {
// TODO: these constraints should be supported for partition columns
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("NOT NULL,DEFAULT and CHECK Constraints are not allowed with " + "partition columns. "));
}
break;
case HiveParser.TOK_ALTERTABLE_BUCKETS:
bucketCols = getColumnNames((ASTNode) child.getChild(0));
if (child.getChildCount() == 2) {
numBuckets = Integer.parseInt(child.getChild(1).getText());
} else {
sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
numBuckets = Integer.parseInt(child.getChild(2).getText());
}
break;
case HiveParser.TOK_TABLEROWFORMAT:
rowFormatParams.analyzeRowFormat(child);
break;
case HiveParser.TOK_TABLELOCATION:
location = unescapeSQLString(child.getChild(0).getText());
location = EximUtil.relativeToAbsolutePath(conf, location);
inputs.add(toReadEntity(location));
break;
case HiveParser.TOK_TABLEPROPERTIES:
tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0));
break;
case HiveParser.TOK_TABLESERIALIZER:
child = (ASTNode) child.getChild(0);
storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
if (child.getChildCount() == 2) {
readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
}
break;
case HiveParser.TOK_TABLESKEWED:
/**
* Throw an error if the user tries to use the DDL with
* hive.internal.ddl.list.bucketing.enable set to false.
*/
HiveConf hiveConf = SessionState.get().getConf();
// skewed column names
skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, child);
// skewed value
analyzeDDLSkewedValues(skewedValues, child);
// stored as directories
storedAsDirs = analyzeStoredAdDirs(child);
break;
default:
throw new AssertionError("Unknown token: " + child.getToken());
}
}
if (command_type == CREATE_TABLE || command_type == CTLT) {
queryState.setCommandType(HiveOperation.CREATETABLE);
} else if (command_type == CTAS) {
queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
} else {
throw new SemanticException("Unrecognized command.");
}
if (isExt && hasEnabledOrValidatedConstraints(notNullConstraints, defaultConstraints, checkConstraints)) {
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Constraints are disallowed with External tables. " + "Only RELY is allowed."));
}
if (checkConstraints != null && !checkConstraints.isEmpty()) {
validateCheckConstraint(cols, checkConstraints, ctx.getConf());
}
storageFormat.fillDefaultStorageFormat(isExt, false);
// check for existence of table
if (ifNotExists) {
try {
Table table = getTable(qualifiedTabName, false);
if (table != null) {
// table exists
return null;
}
} catch (HiveException e) {
// should not occur since second parameter to getTableWithQN is false
throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e);
}
}
if (isTemporary) {
if (partCols.size() > 0) {
throw new SemanticException("Partition columns are not supported on temporary tables");
}
if (location == null) {
// it has the same life cycle as the tmp table
try {
// Generate a unique ID for temp table path.
// This path will be fixed for the life of the temp table.
Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString());
path = Warehouse.getDnsPath(path, conf);
location = path.toString();
} catch (MetaException err) {
throw new SemanticException("Error while generating temp table path:", err);
}
}
}
switch(command_type) {
case // REGULAR CREATE TABLE DDL
CREATE_TABLE:
tblProps = addDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
crtTblDesc.validate(conf);
// outputs is empty, which means this create table happens in the current
// database.
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc)));
break;
case // create table like <tbl_name>
CTLT:
tblProps = addDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
if (isTemporary) {
Table likeTable = getTable(likeTableName, false);
if (likeTable != null && likeTable.getPartCols().size() > 0) {
throw new SemanticException("Partition columns are not supported on temporary tables " + "and source table in CREATE TABLE LIKE is partitioned.");
}
}
CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc)));
break;
case // create table as select
CTAS:
if (isTemporary) {
if (!ctx.isExplainSkipExecution() && !isMaterialization) {
String dbName = qualifiedTabName[0];
String tblName = qualifiedTabName[1];
SessionState ss = SessionState.get();
if (ss == null) {
throw new SemanticException("No current SessionState, cannot create temporary table " + dbName + "." + tblName);
}
Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(dbName);
if (tables != null && tables.containsKey(tblName)) {
throw new SemanticException("Temporary table " + dbName + "." + tblName + " already exists");
}
}
} else {
// dumpTable is only used to check the conflict for non-temporary tables
try {
Table dumpTable = db.newTable(dbDotTab);
if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
if (location != null && location.length() != 0) {
Path locPath = new Path(location);
FileSystem curFs = null;
FileStatus locStats = null;
try {
curFs = locPath.getFileSystem(conf);
if (curFs != null) {
locStats = curFs.getFileStatus(locPath);
}
if (locStats != null && locStats.isDir()) {
FileStatus[] lStats = curFs.listStatus(locPath);
if (lStats != null && lStats.length != 0) {
// Don't throw an exception if the target location only contains the staging-dirs
for (FileStatus lStat : lStats) {
if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) {
throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location));
}
}
}
}
} catch (FileNotFoundException nfe) {
// we will create the folder if it does not exist.
} catch (IOException ioE) {
if (LOG.isDebugEnabled()) {
LOG.debug("Exception when validate folder ", ioE);
}
}
}
tblProps = addDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization);
addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, tblProps);
tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
tableDesc.setMaterialization(isMaterialization);
tableDesc.setStoredAsSubDirectories(storedAsDirs);
tableDesc.setNullFormat(rowFormatParams.nullFormat);
qb.setTableDesc(tableDesc);
return selectStmt;
default:
throw new SemanticException("Unrecognized command.");
}
return null;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method getCheckConstraintExpr.
private ExprNodeDesc getCheckConstraintExpr(Table tbl, Operator input, RowResolver inputRR, String dest) throws SemanticException {
CheckConstraint cc = null;
try {
cc = Hive.get().getEnabledCheckConstraints(tbl.getDbName(), tbl.getTableName());
} catch (HiveException e) {
throw new SemanticException(e);
}
if (cc == null || cc.getCheckConstraints().isEmpty()) {
return null;
}
// build a map which tracks the name of column in input's signature to corresponding table column name
// this will be used to replace column references in CHECK expression AST with corresponding column name
// in input
Map<String, String> col2Cols = new HashMap<>();
List<ColumnInfo> colInfos = input.getSchema().getSignature();
int colIdx = 0;
if (updating(dest)) {
// if this is an update we need to skip the first col since it is row id
colIdx = 1;
}
for (FieldSchema fs : tbl.getCols()) {
// since SQL is case insenstive just to make sure that the comparison b/w column names
// and check expression's column reference work convert the key to lower case
col2Cols.put(fs.getName().toLowerCase(), colInfos.get(colIdx).getInternalName());
colIdx++;
}
List<String> checkExprStrs = cc.getCheckExpressionList();
TypeCheckCtx typeCheckCtx = new TypeCheckCtx(inputRR);
ExprNodeDesc checkAndExprs = null;
for (String checkExprStr : checkExprStrs) {
try {
ParseDriver parseDriver = new ParseDriver();
ASTNode checkExprAST = parseDriver.parseExpression(checkExprStr);
// replace column references in checkExprAST with corresponding columns in input
replaceColumnReference(checkExprAST, col2Cols, inputRR);
Map<ASTNode, ExprNodeDesc> genExprs = TypeCheckProcFactory.genExprNode(checkExprAST, typeCheckCtx);
ExprNodeDesc checkExpr = genExprs.get(checkExprAST);
// Check constraint fails only if it evaluates to false, NULL/UNKNOWN should evaluate to TRUE
ExprNodeDesc notFalseCheckExpr = TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc("isnotfalse", checkExpr);
if (checkAndExprs == null) {
checkAndExprs = notFalseCheckExpr;
} else {
checkAndExprs = TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc("and", checkAndExprs, notFalseCheckExpr);
}
} catch (Exception e) {
throw new SemanticException(e);
}
}
return checkAndExprs;
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method analyzeInternal.
void analyzeInternal(ASTNode ast, PlannerContextFactory pcf) throws SemanticException {
// 1. Generate Resolved Parse tree from syntax tree
LOG.info("Starting Semantic Analysis");
// change the location of position alias process here
processPositionAlias(ast);
PlannerContext plannerCtx = pcf.create();
if (!genResolvedParseTree(ast, plannerCtx)) {
return;
}
if (HiveConf.getBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY)) {
for (String alias : qb.getSubqAliases()) {
removeOBInSubQuery(qb.getSubqForAlias(alias));
}
}
// Check query results cache.
// If no masking/filtering required, then we can check the cache now, before
// generating the operator tree and going through CBO.
// Otherwise we have to wait until after the masking/filtering step.
boolean isCacheEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED);
QueryResultsCache.LookupInfo lookupInfo = null;
boolean needsTransform = needsTransform();
if (isCacheEnabled && !needsTransform && queryTypeCanUseCache()) {
lookupInfo = createLookupInfoForQuery(ast);
if (checkResultsCache(lookupInfo)) {
return;
}
}
// 2. Gen OP Tree from resolved Parse Tree
Operator sinkOp = genOPTree(ast, plannerCtx);
if (!unparseTranslator.isEnabled() && (tableMask.isEnabled() && analyzeRewrite == null)) {
// Here we rewrite the * and also the masking table
ASTNode tree = rewriteASTWithMaskAndFilter(tableMask, ast, ctx.getTokenRewriteStream(), ctx, db, tabNameToTabObject, ignoredTokens);
if (tree != ast) {
plannerCtx = pcf.create();
ctx.setSkipTableMasking(true);
init(true);
// change the location of position alias process here
processPositionAlias(tree);
genResolvedParseTree(tree, plannerCtx);
if (this instanceof CalcitePlanner) {
((CalcitePlanner) this).resetCalciteConfiguration();
}
sinkOp = genOPTree(tree, plannerCtx);
}
}
// here, after applying the masking/filtering rewrite rules to the AST.
if (isCacheEnabled && needsTransform && queryTypeCanUseCache()) {
lookupInfo = createLookupInfoForQuery(ast);
if (checkResultsCache(lookupInfo)) {
return;
}
}
// 3. Deduce Resultset Schema
if (createVwDesc != null && !this.ctx.isCboSucceeded()) {
resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver());
} else {
// succeeds.
if (resultSchema == null) {
resultSchema = convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp).getRowResolver(), HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
}
}
// 4. Generate Parse Context for Optimizer & Physical compiler
copyInfoToQueryProperties(queryProperties);
ParseContext pCtx = new ParseContext(queryState, opToPartPruner, opToPartList, topOps, new HashSet<JoinOperator>(joinContext.keySet()), new HashSet<SMBMapJoinOperator>(smbMapJoinContext.keySet()), loadTableWork, loadFileWork, columnStatsAutoGatherContexts, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, prunedPartitions, tabNameToTabObject, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, analyzeRewrite, tableDesc, createVwDesc, materializedViewUpdateDesc, queryProperties, viewProjectToTableSchema, acidFileSinks);
// Set the semijoin hints in parse context
pCtx.setSemiJoinHints(parseSemiJoinHint(getQB().getParseInfo().getHintList()));
// Set the mapjoin hint if it needs to be disabled.
pCtx.setDisableMapJoin(disableMapJoinWithHint(getQB().getParseInfo().getHintList()));
// 5. Take care of view creation
if (createVwDesc != null) {
if (ctx.getExplainAnalyze() == AnalyzeState.RUNNING) {
return;
}
if (!ctx.isCboSucceeded()) {
saveViewDefinition();
}
// validate the create view statement at this point, the createVwDesc gets
// all the information for semanticcheck
validateCreateView();
if (createVwDesc.isMaterialized()) {
createVwDesc.setTablesUsed(getTablesUsed(pCtx));
} else {
// Since we're only creating a view (not executing it), we don't need to
// optimize or translate the plan (and in fact, those procedures can
// interfere with the view creation). So skip the rest of this method.
ctx.setResDir(null);
ctx.setResFile(null);
try {
PlanUtils.addInputsForView(pCtx);
} catch (HiveException e) {
throw new SemanticException(e);
}
// Generate lineage info for create view statements
// if LineageLogger hook is configured.
// Add the transformation that computes the lineage information.
Set<String> postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings().split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS))));
if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) {
ArrayList<Transform> transformations = new ArrayList<Transform>();
transformations.add(new HiveOpConverterPostProc());
transformations.add(new Generator(postExecHooks));
for (Transform t : transformations) {
pCtx = t.transform(pCtx);
}
// we just use view name as location.
queryState.getLineageState().mapDirToOp(new Path(createVwDesc.getViewName()), sinkOp);
}
return;
}
}
// 6. Generate table access stats if required
if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_TABLEKEYS)) {
TableAccessAnalyzer tableAccessAnalyzer = new TableAccessAnalyzer(pCtx);
setTableAccessInfo(tableAccessAnalyzer.analyzeTableAccess());
}
// 7. Perform Logical optimization
if (LOG.isDebugEnabled()) {
LOG.debug("Before logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
}
Optimizer optm = new Optimizer();
optm.setPctx(pCtx);
optm.initialize(conf);
pCtx = optm.optimize();
if (pCtx.getColumnAccessInfo() != null) {
// set ColumnAccessInfo for view column authorization
setColumnAccessInfo(pCtx.getColumnAccessInfo());
}
if (LOG.isDebugEnabled()) {
LOG.debug("After logical optimization\n" + Operator.toString(pCtx.getTopOps().values()));
}
// 8. Generate column access stats if required - wait until column pruning
// takes place during optimization
boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
if (isColumnInfoNeedForAuth || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
// view column access info is carried by this.getColumnAccessInfo().
setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess(this.getColumnAccessInfo()));
}
// TEZ..)
if (!ctx.getExplainLogical()) {
TaskCompiler compiler = TaskCompilerFactory.getCompiler(conf, pCtx);
compiler.init(queryState, console, db);
compiler.compile(pCtx, rootTasks, inputs, outputs);
fetchTask = pCtx.getFetchTask();
}
// find all Acid FileSinkOperatorS
QueryPlanPostProcessor qp = new QueryPlanPostProcessor(rootTasks, acidFileSinks, ctx.getExecutionId());
LOG.info("Completed plan generation");
// 10. put accessed columns to readEntity
if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) {
putAccessedColumnsToReadEntity(inputs, columnAccessInfo);
}
if (isCacheEnabled && lookupInfo != null) {
if (queryCanBeCached()) {
QueryResultsCache.QueryInfo queryInfo = createCacheQueryInfoForQuery(lookupInfo);
// Specify that the results of this query can be cached.
setCacheUsage(new CacheUsage(CacheUsage.CacheStatus.CAN_CACHE_QUERY_RESULTS, queryInfo));
}
}
}
use of org.apache.hadoop.hive.ql.metadata.HiveException in project hive by apache.
the class SemanticAnalyzer method validate.
@Override
public void validate() throws SemanticException {
LOG.debug("validation start");
boolean wasAcidChecked = false;
// Validate inputs and outputs have right protectmode to execute the query
for (ReadEntity readEntity : getInputs()) {
ReadEntity.Type type = readEntity.getType();
if (type != ReadEntity.Type.TABLE && type != ReadEntity.Type.PARTITION) {
// here to make the logic complete.
continue;
}
Table tbl = readEntity.getTable();
Partition p = readEntity.getPartition();
if (p != null) {
tbl = p.getTable();
}
if (tbl != null && AcidUtils.isTransactionalTable(tbl)) {
transactionalInQuery = true;
if (!wasAcidChecked) {
checkAcidTxnManager(tbl);
}
wasAcidChecked = true;
}
}
for (WriteEntity writeEntity : getOutputs()) {
WriteEntity.Type type = writeEntity.getType();
if (type == WriteEntity.Type.PARTITION || type == WriteEntity.Type.DUMMYPARTITION) {
String conflictingArchive = null;
try {
Partition usedp = writeEntity.getPartition();
Table tbl = usedp.getTable();
if (AcidUtils.isTransactionalTable(tbl)) {
transactionalInQuery = true;
if (!wasAcidChecked) {
checkAcidTxnManager(tbl);
}
wasAcidChecked = true;
}
LOG.debug("validated " + usedp.getName());
LOG.debug(usedp.getTable().getTableName());
WriteEntity.WriteType writeType = writeEntity.getWriteType();
if (writeType != WriteType.UPDATE && writeType != WriteType.DELETE) {
// Do not check for ACID; it does not create new parts and this is expensive as hell.
// TODO: add an API to get table name list for archived parts with a single call;
// nobody uses this so we could skip the whole thing.
conflictingArchive = ArchiveUtils.conflictingArchiveNameOrNull(db, tbl, usedp.getSpec());
}
} catch (HiveException e) {
throw new SemanticException(e);
}
if (conflictingArchive != null) {
String message = String.format("Insert conflict with existing archive: %s", conflictingArchive);
throw new SemanticException(message);
}
} else if (type == WriteEntity.Type.TABLE) {
Table tbl = writeEntity.getTable();
if (AcidUtils.isTransactionalTable(tbl)) {
transactionalInQuery = true;
if (!wasAcidChecked) {
checkAcidTxnManager(tbl);
}
wasAcidChecked = true;
}
}
if (type != WriteEntity.Type.TABLE && type != WriteEntity.Type.PARTITION) {
LOG.debug("not validating writeEntity, because entity is neither table nor partition");
continue;
}
}
boolean reworkMapredWork = HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_REWORK_MAPREDWORK);
// validate all tasks
for (Task<? extends Serializable> rootTask : rootTasks) {
validate(rootTask, reworkMapredWork);
}
}
Aggregations