use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class DescDatabaseAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
if (root.getChildCount() == 0 || root.getChildCount() > 2) {
throw new SemanticException("Unexpected Tokens at DESCRIBE DATABASE");
}
ctx.setResFile(ctx.getLocalTmpPath());
String databaseName = stripQuotes(root.getChild(0).getText());
boolean isExtended = root.getChildCount() == 2;
inputs.add(new ReadEntity(getDatabase(databaseName)));
DescDatabaseDesc desc = new DescDatabaseDesc(ctx.getResFile(), databaseName, isExtended);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(desc.getSchema()));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableAlterPartitionAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
Table table = getTable(tableName);
validateAlterTableType(table, AlterTableType.ALTERPARTITION, false);
inputs.add(new ReadEntity(table));
// Alter table ... partition column ( column newtype) only takes one column at a time.
ASTNode colAst = (ASTNode) command.getChild(0);
String name = colAst.getChild(0).getText().toLowerCase();
String type = getTypeStringFromAST((ASTNode) (colAst.getChild(1)));
String comment = (colAst.getChildCount() == 3) ? unescapeSQLString(colAst.getChild(2).getText()) : null;
FieldSchema newCol = new FieldSchema(unescapeIdentifier(name), type, comment);
boolean isDefined = false;
for (FieldSchema col : table.getTTable().getPartitionKeys()) {
if (col.getName().compareTo(newCol.getName()) == 0) {
isDefined = true;
}
}
if (!isDefined) {
throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName()));
}
AlterTableAlterPartitionDesc desc = new AlterTableAlterPartitionDesc(tableName.getNotEmptyDbTable(), newCol);
if (AcidUtils.isTransactionalTable(table)) {
setAcidDdlDesc(desc);
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class DescTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());
ASTNode tableTypeExpr = (ASTNode) root.getChild(0);
TableName tableName = getQualifiedTableName((ASTNode) tableTypeExpr.getChild(0));
// if database is not the one currently using validate database
if (tableName.getDb() != null) {
db.validateDatabaseExists(tableName.getDb());
}
Table table = getTable(tableName);
// process the second child, if exists, node to get partition spec(s)
Map<String, String> partitionSpec = getPartitionSpec(db, tableTypeExpr, tableName);
if (partitionSpec != null) {
// validate that partition exists
PartitionUtils.getPartition(db, table, partitionSpec, true);
}
// process the third child node,if exists, to get partition spec(s)
String columnPath = getColumnPath(db, tableTypeExpr, tableName, partitionSpec);
boolean showColStats = false;
boolean isFormatted = false;
boolean isExt = false;
if (root.getChildCount() == 2) {
int descOptions = root.getChild(1).getType();
isFormatted = descOptions == HiveParser.KW_FORMATTED;
isExt = descOptions == HiveParser.KW_EXTENDED;
// This is how we can differentiate if we are describing a table or column.
if (columnPath != null && isFormatted) {
showColStats = true;
}
}
inputs.add(new ReadEntity(table));
DescTableDesc desc = new DescTableDesc(ctx.getResFile(), tableName, partitionSpec, columnPath, isExt, isFormatted);
Task<?> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA;
setFetchTask(createFetchTask(schema));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class ShowTableStatusAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
if (root.getChildCount() > 3 || root.getChildCount() < 1) {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg());
}
ctx.setResFile(ctx.getLocalTmpPath());
String tableNames = getUnescapedName((ASTNode) root.getChild(0));
String dbName = SessionState.get().getCurrentDatabase();
Map<String, String> partitionSpec = null;
if (root.getChildCount() > 1) {
for (int i = 1; i < root.getChildCount(); i++) {
ASTNode child = (ASTNode) root.getChild(i);
if (child.getToken().getType() == HiveParser.Identifier) {
dbName = unescapeIdentifier(child.getText());
} else if (child.getToken().getType() == HiveParser.TOK_PARTSPEC) {
partitionSpec = getValidatedPartSpec(getTable(tableNames), child, conf, false);
} else {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(child.toStringTree() + " , Invalid token " + child.getToken().getType()));
}
}
}
if (partitionSpec != null) {
// validate that partition exists
PartitionUtils.getPartition(db, getTable(HiveTableName.of(tableNames)), partitionSpec, true);
}
ShowTableStatusDesc desc = new ShowTableStatusDesc(ctx.getResFile(), dbName, tableNames, partitionSpec);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowTableStatusDesc.SCHEMA));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableAddConstraintAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
// TODO CAT - for now always use the default catalog. Eventually will want to see if
// the user specified a catalog
List<SQLPrimaryKey> primaryKeys = new ArrayList<>();
List<SQLForeignKey> foreignKeys = new ArrayList<>();
List<SQLUniqueConstraint> uniqueConstraints = new ArrayList<>();
List<SQLCheckConstraint> checkConstraints = new ArrayList<>();
ASTNode constraintNode = (ASTNode) command.getChild(0);
switch(constraintNode.getToken().getType()) {
case HiveParser.TOK_UNIQUE:
ConstraintsUtils.processUniqueConstraints(tableName, constraintNode, uniqueConstraints);
break;
case HiveParser.TOK_PRIMARY_KEY:
ConstraintsUtils.processPrimaryKeys(tableName, constraintNode, primaryKeys);
break;
case HiveParser.TOK_FOREIGN_KEY:
ConstraintsUtils.processForeignKeys(tableName, constraintNode, foreignKeys);
break;
case HiveParser.TOK_CHECK_CONSTRAINT:
ConstraintsUtils.processCheckConstraints(tableName, constraintNode, null, checkConstraints, command, ctx.getTokenRewriteStream());
break;
default:
throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg(constraintNode.getToken().getText()));
}
Constraints constraints = new Constraints(primaryKeys, foreignKeys, null, uniqueConstraints, null, checkConstraints);
AlterTableAddConstraintDesc desc = new AlterTableAddConstraintDesc(tableName, null, constraints);
Table table = getTable(tableName);
if (AcidUtils.isTransactionalTable(table)) {
setAcidDdlDesc(desc);
}
addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Aggregations