use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class ConstraintsUtils method processForeignKeys.
public static void processForeignKeys(TableName tableName, ASTNode node, List<SQLForeignKey> foreignKeys) throws SemanticException {
// The ANTLR grammar looks like :
// 1. KW_CONSTRAINT idfr=identifier KW_FOREIGN KW_KEY fkCols=columnParenthesesList
// KW_REFERENCES tabName=tableName parCols=columnParenthesesList
// enableSpec=enableSpecification validateSpec=validateSpecification relySpec=relySpecification
// -> ^(TOK_FOREIGN_KEY $idfr $fkCols $tabName $parCols $relySpec $enableSpec $validateSpec)
// when the user specifies the constraint name (i.e. child.getChildCount() == 7)
// 2. KW_FOREIGN KW_KEY fkCols=columnParenthesesList
// KW_REFERENCES tabName=tableName parCols=columnParenthesesList
// enableSpec=enableSpecification validateSpec=validateSpecification relySpec=relySpecification
// -> ^(TOK_FOREIGN_KEY $fkCols $tabName $parCols $relySpec $enableSpec $validateSpec)
// when the user does not specify the constraint name (i.e. child.getChildCount() == 6)
String constraintName = null;
boolean enable = true;
boolean validate = true;
boolean rely = false;
int fkIndex = -1;
for (int i = 0; i < node.getChildCount(); i++) {
ASTNode grandChild = (ASTNode) node.getChild(i);
int type = grandChild.getToken().getType();
if (type == HiveParser.TOK_CONSTRAINT_NAME) {
constraintName = BaseSemanticAnalyzer.unescapeIdentifier(grandChild.getChild(0).getText().toLowerCase());
} else if (type == HiveParser.TOK_ENABLE) {
enable = true;
// validate is true by default if we enable the constraint
validate = true;
} else if (type == HiveParser.TOK_DISABLE) {
enable = false;
// validate is false by default if we disable the constraint
validate = false;
} else if (type == HiveParser.TOK_VALIDATE) {
validate = true;
} else if (type == HiveParser.TOK_NOVALIDATE) {
validate = false;
} else if (type == HiveParser.TOK_RELY) {
rely = true;
} else if (type == HiveParser.TOK_TABCOLNAME && fkIndex == -1) {
fkIndex = i;
}
}
if (enable) {
throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg("ENABLE feature not supported yet. " + "Please use DISABLE instead."));
}
if (validate) {
throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg("VALIDATE feature not supported yet. " + "Please use NOVALIDATE instead."));
}
int ptIndex = fkIndex + 1;
int pkIndex = ptIndex + 1;
if (node.getChild(fkIndex).getChildCount() != node.getChild(pkIndex).getChildCount()) {
throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg(" The number of foreign key columns should be same as number of parent key columns "));
}
TableName parentTblName = BaseSemanticAnalyzer.getQualifiedTableName((ASTNode) node.getChild(ptIndex));
for (int j = 0; j < node.getChild(fkIndex).getChildCount(); j++) {
SQLForeignKey sqlForeignKey = new SQLForeignKey();
sqlForeignKey.setFktable_db(tableName.getDb());
sqlForeignKey.setFktable_name(tableName.getTable());
Tree fkgrandChild = node.getChild(fkIndex).getChild(j);
BaseSemanticAnalyzer.checkColumnName(fkgrandChild.getText());
sqlForeignKey.setFkcolumn_name(BaseSemanticAnalyzer.unescapeIdentifier(fkgrandChild.getText().toLowerCase()));
sqlForeignKey.setPktable_db(parentTblName.getDb());
sqlForeignKey.setPktable_name(parentTblName.getTable());
Tree pkgrandChild = node.getChild(pkIndex).getChild(j);
sqlForeignKey.setPkcolumn_name(BaseSemanticAnalyzer.unescapeIdentifier(pkgrandChild.getText().toLowerCase()));
sqlForeignKey.setKey_seq(j + 1);
sqlForeignKey.setFk_name(constraintName);
sqlForeignKey.setEnable_cstr(enable);
sqlForeignKey.setValidate_cstr(validate);
sqlForeignKey.setRely_cstr(rely);
foreignKeys.add(sqlForeignKey);
}
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class SemanticAnalyzer method setStatsForNonNativeTable.
private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException {
TableName qTableName = HiveTableName.ofNullable(tableName, dbName);
Map<String, String> mapProp = new HashMap<>();
mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null);
AlterTableUnsetPropertiesDesc alterTblDesc = new AlterTableUnsetPropertiesDesc(qTableName, null, null, false, mapProp, false, null);
this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc)));
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class MsckOperation method execute.
@Override
public int execute() throws HiveException, IOException, TException {
try {
Msck msck = new Msck(false, false);
msck.init(Msck.getMsckConf(context.getDb().getConf()));
msck.updateExpressionProxy(getProxyClass(context.getDb().getConf()));
TableName tableName = HiveTableName.of(desc.getTableName());
long partitionExpirySeconds = -1L;
try (HiveMetaStoreClient msc = new HiveMetaStoreClient(context.getConf())) {
boolean msckEnablePartitionRetention = MetastoreConf.getBoolVar(context.getConf(), MetastoreConf.ConfVars.MSCK_REPAIR_ENABLE_PARTITION_RETENTION);
if (msckEnablePartitionRetention) {
Table table = msc.getTable(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable());
String qualifiedTableName = Warehouse.getCatalogQualifiedTableName(table);
partitionExpirySeconds = PartitionManagementTask.getRetentionPeriodInSeconds(table);
LOG.info("{} - Retention period ({}s) for partition is enabled for MSCK REPAIR..", qualifiedTableName, partitionExpirySeconds);
}
}
MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable(), desc.getFilterExp(), desc.getResFile(), desc.isRepairPartitions(), desc.isAddPartitions(), desc.isDropPartitions(), partitionExpirySeconds);
return msck.repair(msckInfo);
} catch (MetaException e) {
LOG.error("Unable to create msck instance.", e);
return 1;
} catch (SemanticException e) {
LOG.error("Msck failed.", e);
return 1;
}
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class TruncateTableAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
// TOK_TABLE_PARTITION
ASTNode tableNode = (ASTNode) root.getChild(0);
String tableNameString = getUnescapedName((ASTNode) tableNode.getChild(0));
Table table = getTable(tableNameString, true);
TableName tableName = HiveTableName.of(table);
checkTruncateEligibility(root, tableNode, tableNameString, table);
Map<String, String> partitionSpec = getPartSpec((ASTNode) tableNode.getChild(1));
addTruncateTableOutputs(tableNode, table, partitionSpec);
Task<?> truncateTask = null;
ASTNode colNamesNode = (ASTNode) root.getFirstChildWithType(HiveParser.TOK_TABCOLNAME);
if (colNamesNode == null) {
truncateTask = getTruncateTaskWithoutColumnNames(tableName, partitionSpec, table);
} else {
truncateTask = getTruncateTaskWithColumnNames(tableNode, tableName, table, partitionSpec, colNamesNode);
}
rootTasks.add(truncateTask);
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class AlterTableDropPartitionOperation method dropPartitions.
private void dropPartitions(boolean isRepl) throws HiveException {
// ifExists is currently verified in AlterTableDropPartitionAnalyzer
TableName tableName = HiveTableName.of(desc.getTableName());
List<Pair<Integer, byte[]>> partitionExpressions = new ArrayList<>(desc.getPartSpecs().size());
for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()) {
partitionExpressions.add(Pair.of(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec())));
}
PartitionDropOptions options = PartitionDropOptions.instance().deleteData(desc.getDeleteData()).ifExists(true).purgeData(desc.getIfPurge());
List<Partition> droppedPartitions = context.getDb().dropPartitions(tableName.getDb(), tableName.getTable(), partitionExpressions, options);
if (isRepl) {
LOG.info("Dropped {} partitions for replication.", droppedPartitions.size());
// If replaying an event, we need not to bother about the further steps, we can return from here itself.
return;
}
ProactiveEviction.Request.Builder llapEvictRequestBuilder = LlapHiveUtils.isLlapMode(context.getConf()) ? ProactiveEviction.Request.Builder.create() : null;
for (Partition partition : droppedPartitions) {
context.getConsole().printInfo("Dropped the partition " + partition.getName());
// We have already locked the table, don't lock the partitions.
DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context);
if (llapEvictRequestBuilder != null) {
llapEvictRequestBuilder.addPartitionOfATable(tableName.getDb(), tableName.getTable(), partition.getSpec());
}
}
if (llapEvictRequestBuilder != null) {
ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
}
}
Aggregations