use of org.apache.hadoop.hive.ql.plan.DDLWork in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowCreateTable.
private void analyzeShowCreateTable(ASTNode ast) throws SemanticException {
ShowCreateTableDesc showCreateTblDesc;
String tableName = getUnescapedName((ASTNode) ast.getChild(0));
showCreateTblDesc = new ShowCreateTableDesc(tableName, ctx.getResFile().toString());
Table tab = getTable(tableName);
if (tab.getTableType() == org.apache.hadoop.hive.metastore.TableType.INDEX_TABLE) {
throw new SemanticException(ErrorMsg.SHOW_CREATETABLE_INDEX.getMsg(tableName + " has table type INDEX_TABLE"));
}
inputs.add(new ReadEntity(tab));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showCreateTblDesc), conf));
setFetchTask(createFetchTask(showCreateTblDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.plan.DDLWork in project hive by apache.
the class DDLSemanticAnalyzer method analyzeLockDatabase.
private void analyzeLockDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
String mode = unescapeIdentifier(ast.getChild(1).getText().toUpperCase());
inputs.add(new ReadEntity(getDatabase(dbName)));
// Lock database operation is to acquire the lock explicitly, the operation
// itself doesn't need to be locked. Set the WriteEntity as WriteType:
// DDL_NO_LOCK here, otherwise it will conflict with Hive's transaction.
outputs.add(new WriteEntity(getDatabase(dbName), WriteType.DDL_NO_LOCK));
LockDatabaseDesc lockDatabaseDesc = new LockDatabaseDesc(dbName, mode, HiveConf.getVar(conf, ConfVars.HIVEQUERYID));
lockDatabaseDesc.setQueryStr(ctx.getCmd());
DDLWork work = new DDLWork(getInputs(), getOutputs(), lockDatabaseDesc);
rootTasks.add(TaskFactory.get(work, conf));
ctx.setNeedLockMgr(true);
}
use of org.apache.hadoop.hive.ql.plan.DDLWork in project hive by apache.
the class DDLSemanticAnalyzer method analyzeCreateDatabase.
private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = unescapeIdentifier(ast.getChild(0).getText());
boolean ifNotExists = false;
String dbComment = null;
String dbLocation = null;
Map<String, String> dbProps = null;
for (int i = 1; i < ast.getChildCount(); i++) {
ASTNode childNode = (ASTNode) ast.getChild(i);
switch(childNode.getToken().getType()) {
case HiveParser.TOK_IFNOTEXISTS:
ifNotExists = true;
break;
case HiveParser.TOK_DATABASECOMMENT:
dbComment = unescapeSQLString(childNode.getChild(0).getText());
break;
case TOK_DATABASEPROPERTIES:
dbProps = DDLSemanticAnalyzer.getProps((ASTNode) childNode.getChild(0));
break;
case TOK_DATABASELOCATION:
dbLocation = unescapeSQLString(childNode.getChild(0).getText());
addLocationToOutputs(dbLocation);
break;
default:
throw new SemanticException("Unrecognized token in CREATE DATABASE statement");
}
}
CreateDatabaseDesc createDatabaseDesc = new CreateDatabaseDesc(dbName, dbComment, dbLocation, ifNotExists);
if (dbProps != null) {
createDatabaseDesc.setDatabaseProperties(dbProps);
}
Database database = new Database(dbName, dbComment, dbLocation, dbProps);
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createDatabaseDesc), conf));
}
use of org.apache.hadoop.hive.ql.plan.DDLWork in project hive by apache.
the class DDLSemanticAnalyzer method analyzeAlterTableAddConstraint.
private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) throws SemanticException {
ASTNode parent = (ASTNode) ast.getParent();
ASTNode child = (ASTNode) ast.getChild(0);
List<SQLPrimaryKey> primaryKeys = new ArrayList<SQLPrimaryKey>();
List<SQLForeignKey> foreignKeys = new ArrayList<SQLForeignKey>();
if (child.getToken().getType() == HiveParser.TOK_PRIMARY_KEY) {
BaseSemanticAnalyzer.processPrimaryKeys(parent, child, primaryKeys);
} else if (child.getToken().getType() == HiveParser.TOK_FOREIGN_KEY) {
BaseSemanticAnalyzer.processForeignKeys(parent, child, foreignKeys);
}
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, primaryKeys, foreignKeys);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
}
use of org.apache.hadoop.hive.ql.plan.DDLWork in project hive by apache.
the class DDLSemanticAnalyzer method analyzeAlterTableProps.
private void analyzeAlterTableProps(String[] qualified, HashMap<String, String> partSpec, ASTNode ast, boolean expectView, boolean isUnset) throws SemanticException {
String tableName = getDotName(qualified);
HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(0)).getChild(0));
EnvironmentContext environmentContext = null;
// we need to check if the properties are valid, especially for stats.
// they might be changed via alter table .. update statistics or
// alter table .. set tblproperties. If the property is not row_count
// or raw_data_size, it could not be changed through update statistics
boolean changeStatsSucceeded = false;
for (Entry<String, String> entry : mapProp.entrySet()) {
// wrong.
if (entry.getKey().equals(StatsSetupConst.ROW_COUNT) || entry.getKey().equals(StatsSetupConst.RAW_DATA_SIZE)) {
try {
Long.parseLong(entry.getValue());
changeStatsSucceeded = true;
} catch (Exception e) {
throw new SemanticException("AlterTable " + entry.getKey() + " failed with value " + entry.getValue());
}
} else {
if (queryState.getCommandType().equals(HiveOperation.ALTERTABLE_UPDATETABLESTATS.getOperationName()) || queryState.getCommandType().equals(HiveOperation.ALTERTABLE_UPDATEPARTSTATS.getOperationName())) {
throw new SemanticException("AlterTable UpdateStats " + entry.getKey() + " failed because the only valid keys are " + StatsSetupConst.ROW_COUNT + " and " + StatsSetupConst.RAW_DATA_SIZE);
}
}
if (changeStatsSucceeded) {
environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.STATS_GENERATED, StatsSetupConst.USER);
}
}
AlterTableDesc alterTblDesc = null;
if (isUnset == true) {
alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, partSpec, expectView);
if (ast.getChild(1) != null) {
alterTblDesc.setDropIfExists(true);
}
} else {
alterTblDesc = new AlterTableDesc(AlterTableTypes.ADDPROPS, partSpec, expectView);
}
alterTblDesc.setProps(mapProp);
alterTblDesc.setEnvironmentContext(environmentContext);
alterTblDesc.setOldName(tableName);
addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf));
}
Aggregations