use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableChangeColumnAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
// col_old_name col_new_name column_type [COMMENT col_comment] [FIRST|AFTER column_name] [CASCADE|RESTRICT]
String oldColumnName = command.getChild(0).getText().toLowerCase();
String newColumnName = command.getChild(1).getText().toLowerCase();
String newType = getTypeStringFromAST((ASTNode) command.getChild(2));
Table table = getTable(tableName);
SkewedInfo skewInfo = table.getTTable().getSd().getSkewedInfo();
if ((null != skewInfo) && (null != skewInfo.getSkewedColNames()) && skewInfo.getSkewedColNames().contains(oldColumnName)) {
throw new SemanticException(oldColumnName + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg());
}
String newComment = null;
boolean first = false;
String flagCol = null;
boolean isCascade = false;
ASTNode constraintChild = null;
for (int i = 3; i < command.getChildCount(); i++) {
ASTNode child = (ASTNode) command.getChild(i);
switch(child.getToken().getType()) {
case HiveParser.StringLiteral:
newComment = unescapeSQLString(child.getText());
break;
case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION:
flagCol = unescapeIdentifier(child.getChild(0).getText());
break;
case HiveParser.KW_FIRST:
first = true;
break;
case HiveParser.TOK_CASCADE:
isCascade = true;
break;
case HiveParser.TOK_RESTRICT:
break;
default:
constraintChild = child;
}
}
Constraints constraints = getConstraints(tableName, command, newColumnName, table, constraintChild);
AlterTableChangeColumnDesc desc = new AlterTableChangeColumnDesc(tableName, partitionSpec, isCascade, constraints, unescapeIdentifier(oldColumnName), unescapeIdentifier(newColumnName), newType, newComment, first, flagCol);
if (AcidUtils.isTransactionalTable(table)) {
// Note: we might actually need it only when certain changes (e.g. name or type?) are made.
setAcidDdlDesc(desc);
}
addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableReplaceColumnsAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
List<FieldSchema> newCols = getColumns((ASTNode) command.getChild(0));
boolean isCascade = false;
if (null != command.getFirstChildWithType(HiveParser.TOK_CASCADE)) {
isCascade = true;
}
AlterTableReplaceColumnsDesc desc = new AlterTableReplaceColumnsDesc(tableName, partitionSpec, isCascade, newCols);
Table table = getTable(tableName, true);
if (AcidUtils.isTransactionalTable(table)) {
setAcidDdlDesc(desc);
}
addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableAddColumnsAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
List<FieldSchema> newCols = getColumns((ASTNode) command.getChild(0));
boolean isCascade = false;
if (null != command.getFirstChildWithType(HiveParser.TOK_CASCADE)) {
isCascade = true;
}
AlterTableAddColumnsDesc desc = new AlterTableAddColumnsDesc(tableName, partitionSpec, isCascade, newCols);
Table table = getTable(tableName, true);
if (AcidUtils.isTransactionalTable(table)) {
setAcidDdlDesc(desc);
}
addInputsOutputsAlterTable(tableName, partitionSpec, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class ShowTransactionsAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());
ShowTransactionsDesc desc = new ShowTransactionsDesc(ctx.getResFile());
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowTransactionsDesc.SCHEMA));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class CacheMetadataAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
Table table = AnalyzeCommandUtils.getTable(root, this);
CacheMetadataDesc desc;
// In 2 cases out of 3, we could pass the path and type directly to metastore...
if (AnalyzeCommandUtils.isPartitionLevelStats(root)) {
Map<String, String> partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(table, root, conf);
Partition part = PartitionUtils.getPartition(db, table, partSpec, true);
desc = new CacheMetadataDesc(table.getDbName(), table.getTableName(), part.getName());
inputs.add(new ReadEntity(part));
} else {
// Should we get all partitions for a partitioned table?
desc = new CacheMetadataDesc(table.getDbName(), table.getTableName(), table.isPartitioned());
inputs.add(new ReadEntity(table));
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
Aggregations