use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class DropTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String tableName = getUnescapedName((ASTNode) root.getChild(0));
boolean ifExists = (root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT);
Table table = getTable(tableName, throwException);
if (table != null) {
inputs.add(new ReadEntity(table));
boolean tableWithSuffix = (HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX) || HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED)) && AcidUtils.isTransactionalTable(table) && Boolean.parseBoolean(table.getProperty(SOFT_DELETE_TABLE));
outputs.add(new WriteEntity(table, tableWithSuffix ? WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE));
}
boolean purge = (root.getFirstChildWithType(HiveParser.KW_PURGE) != null);
ReplicationSpec replicationSpec = new ReplicationSpec(root);
DropTableDesc desc = new DropTableDesc(tableName, ifExists, purge, replicationSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AbstractAddPartitionAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
Table table = getTable(tableName);
validateAlterTableType(table, AlterTableType.ADDPARTITION, expectView());
boolean ifNotExists = command.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS;
outputs.add(new WriteEntity(table, /* use DDL_EXCLUSIVE to cause X lock to prevent races between concurrent add partition calls with IF NOT EXISTS.
* w/o this 2 concurrent calls to add the same partition may both add data since for transactional tables
* creating partition metadata and moving data there are 2 separate actions. */
ifNotExists && AcidUtils.isTransactionalTable(table) ? WriteType.DDL_EXCLUSIVE : WriteEntity.WriteType.DDL_SHARED));
List<AlterTableAddPartitionDesc.PartitionDesc> partitions = createPartitions(command, table, ifNotExists);
if (partitions.isEmpty()) {
// nothing to do
return;
}
AlterTableAddPartitionDesc desc = new AlterTableAddPartitionDesc(table.getDbName(), table.getTableName(), ifNotExists, partitions);
Task<DDLWork> ddlTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(ddlTask);
postProcess(tableName, table, desc, ddlTask);
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableTouchAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
Table table = getTable(tableName);
validateAlterTableType(table, AlterTableType.TOUCH, false);
inputs.add(new ReadEntity(table));
List<Map<String, String>> partitionSpecs = getPartitionSpecs(table, command);
if (partitionSpecs.isEmpty()) {
AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), null);
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
} else {
PartitionUtils.addTablePartsOutputs(db, outputs, table, partitionSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK);
for (Map<String, String> partitionSpec : partitionSpecs) {
AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), partitionSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
}
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class ShowTablePropertiesAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());
TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0));
String propertyName = (root.getChildCount() > 1) ? unescapeSQLString(root.getChild(1).getText()) : null;
// validate that table exists
getTable(tableName);
ShowTablePropertiesDesc desc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableName, propertyName);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA));
}
use of org.apache.hadoop.hive.ql.ddl.DDLWork in project hive by apache.
the class AlterTableSetOwnerAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
PrincipalDesc ownerPrincipal = AuthorizationParseUtils.getPrincipalDesc((ASTNode) command.getChild(0));
if (ownerPrincipal.getType() == null) {
throw new SemanticException("Owner type can't be null in alter table set owner command");
}
if (ownerPrincipal.getName() == null) {
throw new SemanticException("Owner name can't be null in alter table set owner command");
}
AlterTableSetOwnerDesc desc = new AlterTableSetOwnerDesc(tableName, ownerPrincipal);
addInputsOutputsAlterTable(tableName, null, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));
setAcidDdlDesc(getTable(tableName), desc);
}
Aggregations