use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowColumns.
private void analyzeShowColumns(ASTNode ast) throws SemanticException {
// table name has to be present so min child 1 and max child 4
if (ast.getChildCount() > 4 || ast.getChildCount() < 1) {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(ast.toStringTree()));
}
String tableName = getUnescapedName((ASTNode) ast.getChild(0));
ShowColumnsDesc showColumnsDesc = null;
String pattern = null;
switch(ast.getChildCount()) {
case // only tablename no pattern and db
1:
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
break;
case // tablename and pattern
2:
pattern = unescapeSQLString(ast.getChild(1).getText());
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName, pattern);
break;
case // specifies db
3:
if (tableName.contains(".")) {
throw new SemanticException("Duplicates declaration for database name");
}
tableName = getUnescapedName((ASTNode) ast.getChild(2)) + "." + tableName;
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
break;
case // specifies db and pattern
4:
if (tableName.contains(".")) {
throw new SemanticException("Duplicates declaration for database name");
}
tableName = getUnescapedName((ASTNode) ast.getChild(2)) + "." + tableName;
pattern = unescapeSQLString(ast.getChild(3).getText());
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName, pattern);
break;
default:
break;
}
Table tab = getTable(tableName);
inputs.add(new ReadEntity(tab));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc)));
setFetchTask(createFetchTask(showColumnsDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowCreateDatabase.
private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = getUnescapedName((ASTNode) ast.getChild(0));
ShowCreateDatabaseDesc showCreateDbDesc = new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString());
Database database = getDatabase(dbName);
inputs.add(new ReadEntity(database));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showCreateDbDesc)));
setFetchTask(createFetchTask(showCreateDbDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeExchangePartition.
private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException {
Table destTable = getTable(qualified);
Table sourceTable = getTable(getUnescapedName((ASTNode) ast.getChild(1)));
// Get the partition specs
Map<String, String> partSpecs = getValidatedPartSpec(sourceTable, (ASTNode) ast.getChild(0), conf, false);
validatePartitionValues(partSpecs);
boolean sameColumns = MetaStoreUtils.compareFieldColumns(destTable.getAllCols(), sourceTable.getAllCols());
boolean samePartitions = MetaStoreUtils.compareFieldColumns(destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
if (!sameColumns || !samePartitions) {
throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
}
// files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well.
if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) {
throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg());
}
// check if source partition exists
getPartitions(sourceTable, partSpecs, true);
// Verify that the partitions specified are continuous
// If a subpartition value is specified without specifying a partition's value
// then we throw an exception
int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs);
if (counter < 0) {
throw new SemanticException(ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
}
List<Partition> destPartitions = null;
try {
destPartitions = getPartitions(destTable, partSpecs, true);
} catch (SemanticException ex) {
// We should expect a semantic exception being throw as this partition
// should not be present.
}
if (destPartitions != null) {
// If any destination partition is present then throw a Semantic Exception.
throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
}
AlterTableExchangePartition alterTableExchangePartition = new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTableExchangePartition)));
inputs.add(new ReadEntity(sourceTable));
outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeAlterMaterializedViewRewrite.
private void analyzeAlterMaterializedViewRewrite(String mvName, ASTNode ast) throws SemanticException {
// Value for the flag
boolean enableFlag;
switch(ast.getChild(0).getType()) {
case HiveParser.TOK_REWRITE_ENABLED:
enableFlag = true;
break;
case HiveParser.TOK_REWRITE_DISABLED:
enableFlag = false;
break;
default:
throw new SemanticException("Invalid alter materialized view expression");
}
AlterMaterializedViewDesc alterMVDesc = new AlterMaterializedViewDesc(AlterMaterializedViewTypes.UPDATE_REWRITE_FLAG);
alterMVDesc.setMaterializedViewName(mvName);
alterMVDesc.setRewriteEnableFlag(enableFlag);
// It can be fully qualified name or use default database
Table materializedViewTable = getTable(mvName, true);
// only uses transactional (MM and ACID) tables
if (enableFlag) {
for (String tableName : materializedViewTable.getCreationMetadata().getTablesUsed()) {
Table table = getTable(tableName, true);
if (!AcidUtils.isTransactionalTable(table)) {
throw new SemanticException("Automatic rewriting for materialized view cannot " + "be enabled if the materialized view uses non-transactional tables");
}
}
}
inputs.add(new ReadEntity(materializedViewTable));
outputs.add(new WriteEntity(materializedViewTable, WriteEntity.WriteType.DDL_EXCLUSIVE));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterMVDesc)));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeAltertableSkewedby.
/**
* Analyze alter table's skewed table
*
* @param ast
* node
* @throws SemanticException
*/
private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws SemanticException {
/**
* Throw an error if the user tries to use the DDL with
* hive.internal.ddl.list.bucketing.enable set to false.
*/
HiveConf hiveConf = SessionState.get().getConf();
Table tab = getTable(qualified);
inputs.add(new ReadEntity(tab));
outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE));
validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY);
String tableName = getDotName(qualified);
if (ast.getChildCount() == 0) {
/* Convert a skewed table to non-skewed table. */
AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, new ArrayList<String>(), new ArrayList<List<String>>());
alterTblDesc.setStoredAsSubDirectories(false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
} else {
switch(((ASTNode) ast.getChild(0)).getToken().getType()) {
case HiveParser.TOK_TABLESKEWED:
handleAlterTableSkewedBy(ast, tableName, tab);
break;
case HiveParser.TOK_STOREDASDIRS:
handleAlterTableDisableStoredAsDirs(tableName, tab);
break;
default:
assert false;
}
}
}
Aggregations