use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DropDataConnectorAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String connectorName = unescapeIdentifier(root.getChild(0).getText());
boolean ifExists = root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null;
DataConnector connector = getDataConnector(connectorName, !ifExists);
if (connector == null) {
return;
}
inputs.add(new ReadEntity(connector));
outputs.add(new WriteEntity(connector, WriteEntity.WriteType.DDL_EXCLUSIVE));
DropDataConnectorDesc desc = new DropDataConnectorDesc(connectorName, ifExists);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DropTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
String tableName = getUnescapedName((ASTNode) root.getChild(0));
boolean ifExists = (root.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
boolean throwException = !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROP_IGNORES_NON_EXISTENT);
Table table = getTable(tableName, throwException);
if (table != null) {
inputs.add(new ReadEntity(table));
boolean tableWithSuffix = (HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX) || HiveConf.getBoolVar(conf, ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED)) && AcidUtils.isTransactionalTable(table) && Boolean.parseBoolean(table.getProperty(SOFT_DELETE_TABLE));
outputs.add(new WriteEntity(table, tableWithSuffix ? WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE));
}
boolean purge = (root.getFirstChildWithType(HiveParser.KW_PURGE) != null);
ReplicationSpec replicationSpec = new ReplicationSpec(root);
DropTableDesc desc = new DropTableDesc(tableName, ifExists, purge, replicationSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class DescTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());
ASTNode tableTypeExpr = (ASTNode) root.getChild(0);
TableName tableName = getQualifiedTableName((ASTNode) tableTypeExpr.getChild(0));
// if database is not the one currently using validate database
if (tableName.getDb() != null) {
db.validateDatabaseExists(tableName.getDb());
}
Table table = getTable(tableName);
// process the second child, if exists, node to get partition spec(s)
Map<String, String> partitionSpec = getPartitionSpec(db, tableTypeExpr, tableName);
if (partitionSpec != null) {
// validate that partition exists
PartitionUtils.getPartition(db, table, partitionSpec, true);
}
// process the third child node,if exists, to get partition spec(s)
String columnPath = getColumnPath(db, tableTypeExpr, tableName, partitionSpec);
boolean showColStats = false;
boolean isFormatted = false;
boolean isExt = false;
if (root.getChildCount() == 2) {
int descOptions = root.getChild(1).getType();
isFormatted = descOptions == HiveParser.KW_FORMATTED;
isExt = descOptions == HiveParser.KW_EXTENDED;
// This is how we can differentiate if we are describing a table or column.
if (columnPath != null && isFormatted) {
showColStats = true;
}
}
inputs.add(new ReadEntity(table));
DescTableDesc desc = new DescTableDesc(ctx.getResFile(), tableName, partitionSpec, columnPath, isExt, isFormatted);
Task<?> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA;
setFetchTask(createFetchTask(schema));
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class AlterTableTouchAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpecFromFramework, ASTNode command) throws SemanticException {
Table table = getTable(tableName);
validateAlterTableType(table, AlterTableType.TOUCH, false);
inputs.add(new ReadEntity(table));
List<Map<String, String>> partitionSpecs = getPartitionSpecs(table, command);
if (partitionSpecs.isEmpty()) {
AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), null);
outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
} else {
PartitionUtils.addTablePartsOutputs(db, outputs, table, partitionSpecs, false, WriteEntity.WriteType.DDL_NO_LOCK);
for (Map<String, String> partitionSpec : partitionSpecs) {
AlterTableTouchDesc desc = new AlterTableTouchDesc(tableName.getNotEmptyDbTable(), partitionSpec);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
}
}
use of org.apache.hadoop.hive.ql.hooks.ReadEntity in project hive by apache.
the class AlterTableTouchOperation method execute.
@Override
public int execute() throws HiveException {
// TODO: catalog
Table table = context.getDb().getTable(desc.getTableName());
EnvironmentContext environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
if (desc.getPartitionSpec() == null) {
context.getDb().alterTable(table, false, environmentContext, true);
context.getWork().getInputs().add(new ReadEntity(table));
DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context);
} else {
Partition part = context.getDb().getPartition(table, desc.getPartitionSpec(), false);
if (part == null) {
throw new HiveException("Specified partition does not exist");
}
try {
context.getDb().alterPartition(table.getCatalogName(), table.getDbName(), table.getTableName(), part, environmentContext, true);
} catch (InvalidOperationException e) {
throw new HiveException(e);
}
context.getWork().getInputs().add(new ReadEntity(part));
DDLUtils.addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK), context);
}
return 0;
}
Aggregations