use of org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc in project hive by apache.
the class DDLTask method execute.
@Override
public int execute(DriverContext driverContext) {
if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) {
return 0;
}
// Create the db
Hive db;
try {
db = Hive.get(conf);
CreateDatabaseDesc createDatabaseDesc = work.getCreateDatabaseDesc();
if (null != createDatabaseDesc) {
return createDatabase(db, createDatabaseDesc);
}
DropDatabaseDesc dropDatabaseDesc = work.getDropDatabaseDesc();
if (dropDatabaseDesc != null) {
return dropDatabase(db, dropDatabaseDesc);
}
LockDatabaseDesc lockDatabaseDesc = work.getLockDatabaseDesc();
if (lockDatabaseDesc != null) {
return lockDatabase(db, lockDatabaseDesc);
}
UnlockDatabaseDesc unlockDatabaseDesc = work.getUnlockDatabaseDesc();
if (unlockDatabaseDesc != null) {
return unlockDatabase(db, unlockDatabaseDesc);
}
SwitchDatabaseDesc switchDatabaseDesc = work.getSwitchDatabaseDesc();
if (switchDatabaseDesc != null) {
return switchDatabase(db, switchDatabaseDesc);
}
DescDatabaseDesc descDatabaseDesc = work.getDescDatabaseDesc();
if (descDatabaseDesc != null) {
return descDatabase(db, descDatabaseDesc);
}
AlterDatabaseDesc alterDatabaseDesc = work.getAlterDatabaseDesc();
if (alterDatabaseDesc != null) {
return alterDatabase(db, alterDatabaseDesc);
}
CreateTableDesc crtTbl = work.getCreateTblDesc();
if (crtTbl != null) {
return createTable(db, crtTbl);
}
CreateIndexDesc crtIndex = work.getCreateIndexDesc();
if (crtIndex != null) {
return createIndex(db, crtIndex);
}
AlterIndexDesc alterIndex = work.getAlterIndexDesc();
if (alterIndex != null) {
return alterIndex(db, alterIndex);
}
DropIndexDesc dropIdx = work.getDropIdxDesc();
if (dropIdx != null) {
return dropIndex(db, dropIdx);
}
CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
if (crtTblLike != null) {
return createTableLike(db, crtTblLike);
}
DropTableDesc dropTbl = work.getDropTblDesc();
if (dropTbl != null) {
dropTableOrPartitions(db, dropTbl);
return 0;
}
AlterTableDesc alterTbl = work.getAlterTblDesc();
if (alterTbl != null) {
if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT) {
return dropConstraint(db, alterTbl);
} else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) {
return addConstraint(db, alterTbl);
} else {
return alterTable(db, alterTbl);
}
}
CreateViewDesc crtView = work.getCreateViewDesc();
if (crtView != null) {
return createView(db, crtView);
}
AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
if (addPartitionDesc != null) {
return addPartitions(db, addPartitionDesc);
}
RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
if (renamePartitionDesc != null) {
return renamePartition(db, renamePartitionDesc);
}
AlterTableSimpleDesc simpleDesc = work.getAlterTblSimpleDesc();
if (simpleDesc != null) {
if (simpleDesc.getType() == AlterTableTypes.TOUCH) {
return touch(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.ARCHIVE) {
return archive(db, simpleDesc, driverContext);
} else if (simpleDesc.getType() == AlterTableTypes.UNARCHIVE) {
return unarchive(db, simpleDesc);
} else if (simpleDesc.getType() == AlterTableTypes.COMPACT) {
return compact(db, simpleDesc);
}
}
MsckDesc msckDesc = work.getMsckDesc();
if (msckDesc != null) {
return msck(db, msckDesc);
}
DescTableDesc descTbl = work.getDescTblDesc();
if (descTbl != null) {
return describeTable(db, descTbl);
}
DescFunctionDesc descFunc = work.getDescFunctionDesc();
if (descFunc != null) {
return describeFunction(db, descFunc);
}
ShowDatabasesDesc showDatabases = work.getShowDatabasesDesc();
if (showDatabases != null) {
return showDatabases(db, showDatabases);
}
ShowTablesDesc showTbls = work.getShowTblsDesc();
if (showTbls != null) {
return showTablesOrViews(db, showTbls);
}
ShowColumnsDesc showCols = work.getShowColumnsDesc();
if (showCols != null) {
return showColumns(db, showCols);
}
ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc();
if (showTblStatus != null) {
return showTableStatus(db, showTblStatus);
}
ShowTblPropertiesDesc showTblProperties = work.getShowTblPropertiesDesc();
if (showTblProperties != null) {
return showTableProperties(db, showTblProperties);
}
ShowFunctionsDesc showFuncs = work.getShowFuncsDesc();
if (showFuncs != null) {
return showFunctions(db, showFuncs);
}
ShowLocksDesc showLocks = work.getShowLocksDesc();
if (showLocks != null) {
return showLocks(db, showLocks);
}
ShowCompactionsDesc compactionsDesc = work.getShowCompactionsDesc();
if (compactionsDesc != null) {
return showCompactions(db, compactionsDesc);
}
ShowTxnsDesc txnsDesc = work.getShowTxnsDesc();
if (txnsDesc != null) {
return showTxns(db, txnsDesc);
}
AbortTxnsDesc abortTxnsDesc = work.getAbortTxnsDesc();
if (abortTxnsDesc != null) {
return abortTxns(db, abortTxnsDesc);
}
LockTableDesc lockTbl = work.getLockTblDesc();
if (lockTbl != null) {
return lockTable(db, lockTbl);
}
UnlockTableDesc unlockTbl = work.getUnlockTblDesc();
if (unlockTbl != null) {
return unlockTable(db, unlockTbl);
}
ShowPartitionsDesc showParts = work.getShowPartsDesc();
if (showParts != null) {
return showPartitions(db, showParts);
}
ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
if (showCreateDb != null) {
return showCreateDatabase(db, showCreateDb);
}
ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
if (showCreateTbl != null) {
return showCreateTable(db, showCreateTbl);
}
ShowConfDesc showConf = work.getShowConfDesc();
if (showConf != null) {
return showConf(db, showConf);
}
RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
if (roleDDLDesc != null) {
return roleDDL(db, roleDDLDesc);
}
GrantDesc grantDesc = work.getGrantDesc();
if (grantDesc != null) {
return grantOrRevokePrivileges(db, grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true);
}
RevokeDesc revokeDesc = work.getRevokeDesc();
if (revokeDesc != null) {
return grantOrRevokePrivileges(db, revokeDesc.getPrincipals(), revokeDesc.getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, revokeDesc.isGrantOption(), false);
}
ShowGrantDesc showGrantDesc = work.getShowGrantDesc();
if (showGrantDesc != null) {
return showGrants(db, showGrantDesc);
}
GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL();
if (grantOrRevokeRoleDDL != null) {
return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
}
ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
if (showIndexes != null) {
return showIndexes(db, showIndexes);
}
AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
if (mergeFilesDesc != null) {
return mergeFiles(db, mergeFilesDesc, driverContext);
}
AlterTableAlterPartDesc alterPartDesc = work.getAlterTableAlterPartDesc();
if (alterPartDesc != null) {
return alterTableAlterPart(db, alterPartDesc);
}
TruncateTableDesc truncateTableDesc = work.getTruncateTblDesc();
if (truncateTableDesc != null) {
return truncateTable(db, truncateTableDesc);
}
AlterTableExchangePartition alterTableExchangePartition = work.getAlterTableExchangePartition();
if (alterTableExchangePartition != null) {
return exchangeTablePartition(db, alterTableExchangePartition);
}
CacheMetadataDesc cacheMetadataDesc = work.getCacheMetadataDesc();
if (cacheMetadataDesc != null) {
return cacheMetadata(db, cacheMetadataDesc);
}
InsertTableDesc insertTableDesc = work.getInsertTableDesc();
if (insertTableDesc != null) {
return insertCommitWork(db, insertTableDesc);
}
PreInsertTableDesc preInsertTableDesc = work.getPreInsertTableDesc();
if (preInsertTableDesc != null) {
return preInsertWork(db, preInsertTableDesc);
}
} catch (Throwable e) {
failed(e);
return 1;
}
assert false;
return 0;
}
use of org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc in project hive by apache.
the class SemanticAnalyzer method analyzeCreateTable.
/**
* Analyze the create table command. If it is a regular create-table or
* create-table-like statements, we create a DDLWork and return true. If it is
* a create-table-as-select, we get the necessary info such as the SerDe and
* Storage Format and put it in QB, and return false, indicating the rest of
* the semantic analyzer need to deal with the select statement with respect
* to the SerDe and Storage Format.
*/
ASTNode analyzeCreateTable(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException {
String[] qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0));
String dbDotTab = getDotName(qualifiedTabName);
String likeTableName = null;
List<FieldSchema> cols = new ArrayList<FieldSchema>();
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
List<String> bucketCols = new ArrayList<String>();
List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
List<Order> sortCols = new ArrayList<Order>();
int numBuckets = -1;
String comment = null;
String location = null;
Map<String, String> tblProps = null;
boolean ifNotExists = false;
boolean isExt = false;
boolean isTemporary = false;
boolean isMaterialization = false;
ASTNode selectStmt = null;
// regular CREATE TABLE
final int CREATE_TABLE = 0;
// CREATE TABLE LIKE ... (CTLT)
final int CTLT = 1;
// CREATE TABLE AS SELECT ... (CTAS)
final int CTAS = 2;
int command_type = CREATE_TABLE;
List<String> skewedColNames = new ArrayList<String>();
List<List<String>> skewedValues = new ArrayList<List<String>>();
Map<List<String>, String> listBucketColValuesMapping = new HashMap<List<String>, String>();
boolean storedAsDirs = false;
boolean isUserStorageFormat = false;
RowFormatParams rowFormatParams = new RowFormatParams();
StorageFormat storageFormat = new StorageFormat(conf);
LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine());
int numCh = ast.getChildCount();
/*
* Check the 1st-level children and do simple semantic checks: 1) CTLT and
* CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
* list (target table schema). 3) CTAS does not support partitioning (for
* now).
*/
for (int num = 1; num < numCh; num++) {
ASTNode child = (ASTNode) ast.getChild(num);
if (storageFormat.fillStorageFormat(child)) {
isUserStorageFormat = true;
continue;
}
switch(child.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.KW_EXTERNAL:
isExt = true;
break;
case HiveParser.KW_TEMPORARY:
isTemporary = true;
isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
break;
case HiveParser.TOK_LIKETABLE:
if (child.getChildCount() > 0) {
likeTableName = getUnescapedName((ASTNode) child.getChild(0));
if (likeTableName != null) {
if (command_type == CTAS) {
throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
}
if (cols.size() != 0) {
throw new SemanticException(ErrorMsg.CTLT_COLLST_COEXISTENCE.getMsg());
}
}
command_type = CTLT;
}
break;
case // CTAS
HiveParser.TOK_QUERY:
if (command_type == CTLT) {
throw new SemanticException(ErrorMsg.CTAS_CTLT_COEXISTENCE.getMsg());
}
if (cols.size() != 0) {
throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg());
}
if (partCols.size() != 0 || bucketCols.size() != 0) {
boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING);
if (dynPart == false) {
throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
} else {
// TODO: support dynamic partition for CTAS
throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg());
}
}
if (isExt) {
throw new SemanticException(ErrorMsg.CTAS_EXTTBL_COEXISTENCE.getMsg());
}
command_type = CTAS;
if (plannerCtx != null) {
plannerCtx.setCTASToken(child);
}
selectStmt = child;
break;
case HiveParser.TOK_TABCOLLIST:
cols = getColumns(child, true, primaryKeys, foreignKeys);
break;
case HiveParser.TOK_TABLECOMMENT:
comment = unescapeSQLString(child.getChild(0).getText());
break;
case HiveParser.TOK_TABLEPARTCOLS:
partCols = getColumns((ASTNode) child.getChild(0), false);
break;
case HiveParser.TOK_ALTERTABLE_BUCKETS:
bucketCols = getColumnNames((ASTNode) child.getChild(0));
if (child.getChildCount() == 2) {
numBuckets = Integer.parseInt(child.getChild(1).getText());
} else {
sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
numBuckets = Integer.parseInt(child.getChild(2).getText());
}
break;
case HiveParser.TOK_TABLEROWFORMAT:
rowFormatParams.analyzeRowFormat(child);
break;
case HiveParser.TOK_TABLELOCATION:
location = unescapeSQLString(child.getChild(0).getText());
location = EximUtil.relativeToAbsolutePath(conf, location);
inputs.add(toReadEntity(location));
break;
case HiveParser.TOK_TABLEPROPERTIES:
tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0));
break;
case HiveParser.TOK_TABLESERIALIZER:
child = (ASTNode) child.getChild(0);
storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
if (child.getChildCount() == 2) {
readProps((ASTNode) (child.getChild(1).getChild(0)), storageFormat.getSerdeProps());
}
break;
case HiveParser.TOK_TABLESKEWED:
/**
* Throw an error if the user tries to use the DDL with
* hive.internal.ddl.list.bucketing.enable set to false.
*/
HiveConf hiveConf = SessionState.get().getConf();
// skewed column names
skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, child);
// skewed value
analyzeDDLSkewedValues(skewedValues, child);
// stored as directories
storedAsDirs = analyzeStoredAdDirs(child);
break;
default:
throw new AssertionError("Unknown token: " + child.getToken());
}
}
if (command_type == CREATE_TABLE || command_type == CTLT) {
queryState.setCommandType(HiveOperation.CREATETABLE);
} else if (command_type == CTAS) {
queryState.setCommandType(HiveOperation.CREATETABLE_AS_SELECT);
} else {
throw new SemanticException("Unrecognized command.");
}
storageFormat.fillDefaultStorageFormat(isExt, false);
// check for existence of table
if (ifNotExists) {
try {
Table table = getTable(qualifiedTabName, false);
if (table != null) {
// table exists
return null;
}
} catch (HiveException e) {
// should not occur since second parameter to getTableWithQN is false
throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e);
}
}
addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE);
if (isTemporary) {
if (partCols.size() > 0) {
throw new SemanticException("Partition columns are not supported on temporary tables");
}
if (location == null) {
// it has the same life cycle as the tmp table
try {
// Generate a unique ID for temp table path.
// This path will be fixed for the life of the temp table.
Path path = new Path(SessionState.getTempTableSpace(conf), UUID.randomUUID().toString());
path = Warehouse.getDnsPath(path, conf);
location = path.toString();
} catch (MetaException err) {
throw new SemanticException("Error while generating temp table path:", err);
}
}
}
// Handle different types of CREATE TABLE command
switch(command_type) {
case // REGULAR CREATE TABLE DDL
CREATE_TABLE:
tblProps = addDefaultProperties(tblProps);
CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, primaryKeys, foreignKeys);
crtTblDesc.setStoredAsSubDirectories(storedAsDirs);
crtTblDesc.setNullFormat(rowFormatParams.nullFormat);
crtTblDesc.validate(conf);
// outputs is empty, which means this create table happens in the current
// database.
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc), conf));
break;
case // create table like <tbl_name>
CTLT:
tblProps = addDefaultProperties(tblProps);
if (isTemporary) {
Table likeTable = getTable(likeTableName, false);
if (likeTable != null && likeTable.getPartCols().size() > 0) {
throw new SemanticException("Partition columns are not supported on temporary tables " + "and source table in CREATE TABLE LIKE is partitioned.");
}
}
CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc), conf));
break;
case // create table as select
CTAS:
if (isTemporary) {
if (!ctx.isExplainSkipExecution() && !isMaterialization) {
String dbName = qualifiedTabName[0];
String tblName = qualifiedTabName[1];
SessionState ss = SessionState.get();
if (ss == null) {
throw new SemanticException("No current SessionState, cannot create temporary table " + dbName + "." + tblName);
}
Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(dbName);
if (tables != null && tables.containsKey(tblName)) {
throw new SemanticException("Temporary table " + dbName + "." + tblName + " already exists");
}
}
} else {
// dumpTable is only used to check the conflict for non-temporary tables
try {
Table dumpTable = db.newTable(dbDotTab);
if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) {
throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab));
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
if (location != null && location.length() != 0) {
Path locPath = new Path(location);
FileSystem curFs = null;
FileStatus locStats = null;
try {
curFs = locPath.getFileSystem(conf);
if (curFs != null) {
locStats = curFs.getFileStatus(locPath);
}
if (locStats != null && locStats.isDir()) {
FileStatus[] lStats = curFs.listStatus(locPath);
if (lStats != null && lStats.length != 0) {
// Don't throw an exception if the target location only contains the staging-dirs
for (FileStatus lStat : lStats) {
if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) {
throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location));
}
}
}
}
} catch (FileNotFoundException nfe) {
//we will create the folder if it does not exist.
} catch (IOException ioE) {
if (LOG.isDebugEnabled()) {
LOG.debug("Exception when validate folder ", ioE);
}
}
}
tblProps = addDefaultProperties(tblProps);
tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues, true, primaryKeys, foreignKeys);
tableDesc.setMaterialization(isMaterialization);
tableDesc.setStoredAsSubDirectories(storedAsDirs);
tableDesc.setNullFormat(rowFormatParams.nullFormat);
qb.setTableDesc(tableDesc);
return selectStmt;
default:
throw new SemanticException("Unrecognized command.");
}
return null;
}
Aggregations