use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeDescDatabase.
/**
* Describe database.
*
* @param ast
* @throws SemanticException
*/
private void analyzeDescDatabase(ASTNode ast) throws SemanticException {
boolean isExtended;
String dbName;
if (ast.getChildCount() == 1) {
dbName = stripQuotes(ast.getChild(0).getText());
isExtended = false;
} else if (ast.getChildCount() == 2) {
dbName = stripQuotes(ast.getChild(0).getText());
isExtended = true;
} else {
throw new SemanticException("Unexpected Tokens at DESCRIBE DATABASE");
}
DescDatabaseDesc descDbDesc = new DescDatabaseDesc(ctx.getResFile(), dbName, isExtended);
inputs.add(new ReadEntity(getDatabase(dbName)));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descDbDesc)));
setFetchTask(createFetchTask(descDbDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowTables.
private void analyzeShowTables(ASTNode ast) throws SemanticException {
ShowTablesDesc showTblsDesc;
String dbName = SessionState.get().getCurrentDatabase();
String tableNames = null;
if (ast.getChildCount() > 3) {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(ast.toStringTree()));
}
switch(ast.getChildCount()) {
case // Uses a pattern
1:
tableNames = unescapeSQLString(ast.getChild(0).getText());
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames);
break;
case // Specifies a DB
2:
assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
dbName = unescapeIdentifier(ast.getChild(1).getText());
validateDatabase(dbName);
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
break;
case // Uses a pattern and specifies a DB
3:
assert (ast.getChild(0).getType() == HiveParser.TOK_FROM);
dbName = unescapeIdentifier(ast.getChild(1).getText());
tableNames = unescapeSQLString(ast.getChild(2).getText());
validateDatabase(dbName);
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames);
break;
default:
// No pattern or DB
showTblsDesc = new ShowTablesDesc(ctx.getResFile(), dbName);
break;
}
inputs.add(new ReadEntity(getDatabase(dbName)));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblsDesc)));
setFetchTask(createFetchTask(showTblsDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowColumns.
private void analyzeShowColumns(ASTNode ast) throws SemanticException {
// table name has to be present so min child 1 and max child 4
if (ast.getChildCount() > 4 || ast.getChildCount() < 1) {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(ast.toStringTree()));
}
String tableName = getUnescapedName((ASTNode) ast.getChild(0));
ShowColumnsDesc showColumnsDesc = null;
String pattern = null;
switch(ast.getChildCount()) {
case // only tablename no pattern and db
1:
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
break;
case // tablename and pattern
2:
pattern = unescapeSQLString(ast.getChild(1).getText());
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName, pattern);
break;
case // specifies db
3:
if (tableName.contains(".")) {
throw new SemanticException("Duplicates declaration for database name");
}
tableName = getUnescapedName((ASTNode) ast.getChild(2)) + "." + tableName;
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName);
break;
case // specifies db and pattern
4:
if (tableName.contains(".")) {
throw new SemanticException("Duplicates declaration for database name");
}
tableName = getUnescapedName((ASTNode) ast.getChild(2)) + "." + tableName;
pattern = unescapeSQLString(ast.getChild(3).getText());
showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName, pattern);
break;
default:
break;
}
Table tab = getTable(tableName);
inputs.add(new ReadEntity(tab));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc)));
setFetchTask(createFetchTask(showColumnsDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowCreateDatabase.
private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException {
String dbName = getUnescapedName((ASTNode) ast.getChild(0));
ShowCreateDatabaseDesc showCreateDbDesc = new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString());
Database database = getDatabase(dbName);
inputs.add(new ReadEntity(database));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showCreateDbDesc)));
setFetchTask(createFetchTask(showCreateDbDesc.getSchema()));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DDLSemanticAnalyzer method analyzeExchangePartition.
private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException {
Table destTable = getTable(qualified);
Table sourceTable = getTable(getUnescapedName((ASTNode) ast.getChild(1)));
// Get the partition specs
Map<String, String> partSpecs = getValidatedPartSpec(sourceTable, (ASTNode) ast.getChild(0), conf, false);
validatePartitionValues(partSpecs);
boolean sameColumns = MetaStoreUtils.compareFieldColumns(destTable.getAllCols(), sourceTable.getAllCols());
boolean samePartitions = MetaStoreUtils.compareFieldColumns(destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
if (!sameColumns || !samePartitions) {
throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
}
// files with write IDs may not be valid. It may affect snapshot isolation for on-going txns as well.
if (AcidUtils.isTransactionalTable(sourceTable) || AcidUtils.isTransactionalTable(destTable)) {
throw new SemanticException(ErrorMsg.EXCHANGE_PARTITION_NOT_ALLOWED_WITH_TRANSACTIONAL_TABLES.getMsg());
}
// check if source partition exists
getPartitions(sourceTable, partSpecs, true);
// Verify that the partitions specified are continuous
// If a subpartition value is specified without specifying a partition's value
// then we throw an exception
int counter = isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs);
if (counter < 0) {
throw new SemanticException(ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
}
List<Partition> destPartitions = null;
try {
destPartitions = getPartitions(destTable, partSpecs, true);
} catch (SemanticException ex) {
// We should expect a semantic exception being throw as this partition
// should not be present.
}
if (destPartitions != null) {
// If any destination partition is present then throw a Semantic Exception.
throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
}
AlterTableExchangePartition alterTableExchangePartition = new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTableExchangePartition)));
inputs.add(new ReadEntity(sourceTable));
outputs.add(new WriteEntity(destTable, WriteType.DDL_SHARED));
}
Aggregations