use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class DescTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());
ASTNode tableTypeExpr = (ASTNode) root.getChild(0);
TableName tableName = getQualifiedTableName((ASTNode) tableTypeExpr.getChild(0));
// if database is not the one currently using validate database
if (tableName.getDb() != null) {
db.validateDatabaseExists(tableName.getDb());
}
Table table = getTable(tableName);
// process the second child, if exists, node to get partition spec(s)
Map<String, String> partitionSpec = getPartitionSpec(db, tableTypeExpr, tableName);
if (partitionSpec != null) {
// validate that partition exists
PartitionUtils.getPartition(db, table, partitionSpec, true);
}
// process the third child node,if exists, to get partition spec(s)
String columnPath = getColumnPath(db, tableTypeExpr, tableName, partitionSpec);
boolean showColStats = false;
boolean isFormatted = false;
boolean isExt = false;
if (root.getChildCount() == 2) {
int descOptions = root.getChild(1).getType();
isFormatted = descOptions == HiveParser.KW_FORMATTED;
isExt = descOptions == HiveParser.KW_EXTENDED;
// This is how we can differentiate if we are describing a table or column.
if (columnPath != null && isFormatted) {
showColStats = true;
}
}
inputs.add(new ReadEntity(table));
DescTableDesc desc = new DescTableDesc(ctx.getResFile(), tableName, partitionSpec, columnPath, isExt, isFormatted);
Task<?> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
String schema = showColStats ? DescTableDesc.COLUMN_STATISTICS_SCHEMA : DescTableDesc.SCHEMA;
setFetchTask(createFetchTask(schema));
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class DescTableOperation method getColumnDataColPathSpecified.
private void getColumnDataColPathSpecified(Table table, Partition part, List<FieldSchema> cols, List<ColumnStatisticsObj> colStats, Deserializer deserializer) throws SemanticException, HiveException, MetaException {
// when column name is specified in describe table DDL, colPath will be db_name.table_name.column_name
String colName = desc.getColumnPath().split("\\.")[2];
List<String> colNames = Lists.newArrayList(colName.toLowerCase());
TableName tableName = HiveTableName.of(desc.getDbTableName());
if (null == part) {
if (table.isPartitioned()) {
Map<String, String> tableProps = table.getParameters() == null ? new HashMap<String, String>() : table.getParameters();
if (table.isPartitionKey(colNames.get(0))) {
getColumnDataForPartitionKeyColumn(table, cols, colStats, colNames, tableProps);
} else {
getColumnsForNotPartitionKeyColumn(cols, colStats, deserializer, colNames, tableName, tableProps);
}
table.setParameters(tableProps);
} else {
cols.addAll(Hive.getFieldsFromDeserializer(desc.getColumnPath(), deserializer));
colStats.addAll(context.getDb().getTableColumnStatistics(tableName.getDb().toLowerCase(), tableName.getTable().toLowerCase(), colNames, false));
}
} else {
List<String> partitions = new ArrayList<String>();
// The partition name is converted to lowercase before generating the stats. So we should use the same
// lower case name to get the stats.
String partName = HMSHandler.lowerCaseConvertPartName(part.getName());
partitions.add(partName);
cols.addAll(Hive.getFieldsFromDeserializer(desc.getColumnPath(), deserializer));
Map<String, List<ColumnStatisticsObj>> partitionColumnStatistics = context.getDb().getPartitionColumnStatistics(tableName.getDb().toLowerCase(), tableName.getTable().toLowerCase(), partitions, colNames, false);
List<ColumnStatisticsObj> partitionColStat = partitionColumnStatistics.get(partName);
if (partitionColStat != null) {
colStats.addAll(partitionColStat);
}
}
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class DropTableOperation method execute.
@Override
public int execute() throws HiveException {
Table table = getTable();
if (table == null) {
// dropping not existing table is handled by DropTableAnalyzer
return 0;
}
if (desc.getValidationRequired()) {
if (table.isView() || table.isMaterializedView()) {
if (desc.isIfExists()) {
return 0;
} else if (table.isView()) {
throw new HiveException("Cannot drop a view with DROP TABLE");
} else {
throw new HiveException("Cannot drop a materialized view with DROP TABLE");
}
}
}
ReplicationSpec replicationSpec = desc.getReplicationSpec();
if (replicationSpec.isInReplicationScope()) {
/**
* DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely
* matches a DROP TABLE IF OLDER THAN(x) semantic.
*
* Ideally, commands executed under the scope of replication need to be idempotent and resilient
* to repeats. What can happen, sometimes, is that a drone processing a replication task can
* have been abandoned for not returning in time, but still execute its task after a while,
* which should not result in it mucking up data that has been impressed later on. So, for eg.,
* if we create partition P1, followed by droppping it, followed by creating it yet again,
* the replication of that drop should not drop the newer partition if it runs after the destination
* object is already in the newer state.
*
* Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can
* drop the object in question(will return false if object is newer than the event, true if not)
*
* In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP
* TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must
* do one more thing - if it does not drop the table because the table is in a newer state, it must
* drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL
* acts like a recursive DROP TABLE IF OLDER.
*/
Map<String, String> dbParams = context.getDb().getDatabase(table.getDbName()).getParameters();
if (!replicationSpec.allowEventReplacementInto(dbParams)) {
// any partitions inside that are older.
if (table.isPartitioned()) {
PartitionIterable partitions = new PartitionIterable(context.getDb(), table, null, MetastoreConf.getIntVar(context.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX));
for (Partition p : partitions) {
if (replicationSpec.allowEventReplacementInto(dbParams)) {
context.getDb().dropPartition(table.getDbName(), table.getTableName(), p.getValues(), true);
}
}
}
LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", desc.getTableName());
// table is newer, leave it be.
return 0;
}
}
// TODO: API w/catalog name
context.getDb().dropTable(table, desc.isPurge());
DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context);
if (LlapHiveUtils.isLlapMode(context.getConf())) {
TableName tableName = HiveTableName.of(table);
ProactiveEviction.Request.Builder llapEvictRequestBuilder = ProactiveEviction.Request.Builder.create();
llapEvictRequestBuilder.addTable(tableName.getDb(), tableName.getTable());
ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
}
return 0;
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class ShowTablePropertiesAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());
TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0));
String propertyName = (root.getChildCount() > 1) ? unescapeSQLString(root.getChild(1).getText()) : null;
// validate that table exists
getTable(tableName);
ShowTablePropertiesDesc desc = new ShowTablePropertiesDesc(ctx.getResFile().toString(), tableName, propertyName);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(ShowTablePropertiesDesc.SCHEMA));
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class AbstractAlterTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0), MetaStoreUtils.getDefaultCatalog(conf));
ASTNode command = (ASTNode) root.getChild(1);
Map<String, String> partitionSpec = null;
ASTNode partitionSpecNode = (ASTNode) root.getChild(2);
if (partitionSpecNode != null) {
// passed in this command.
if (command.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) {
partitionSpec = getPartSpec(partitionSpecNode);
} else {
partitionSpec = getValidatedPartSpec(getTable(tableName), partitionSpecNode, conf, false);
}
}
analyzeCommand(tableName, partitionSpec, command);
}
Aggregations