use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class HiveIcebergMetaHook method setupAlterOperationType.
private void setupAlterOperationType(org.apache.hadoop.hive.metastore.api.Table hmsTable, EnvironmentContext context) throws MetaException {
TableName tableName = new TableName(hmsTable.getCatName(), hmsTable.getDbName(), hmsTable.getTableName());
if (context == null || context.getProperties() == null) {
throw new MetaException("ALTER TABLE operation type on Iceberg table " + tableName + " could not be determined.");
}
String stringOpType = context.getProperties().get(ALTER_TABLE_OPERATION_TYPE);
if (stringOpType != null) {
currentAlterTableOp = AlterTableType.valueOf(stringOpType);
if (SUPPORTED_ALTER_OPS.stream().noneMatch(op -> op.equals(currentAlterTableOp))) {
throw new MetaException("Unsupported ALTER TABLE operation type on Iceberg table " + tableName + ", must be one of: " + SUPPORTED_ALTER_OPS);
}
}
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class CreateViewOperation method createViewObject.
private Table createViewObject() throws HiveException {
TableName name = HiveTableName.of(desc.getViewName());
Table view = new Table(name.getDb(), name.getTable());
view.setViewOriginalText(desc.getOriginalText());
view.setViewExpandedText(desc.getExpandedText());
view.setTableType(TableType.VIRTUAL_VIEW);
view.setSerializationLib(null);
view.clearSerDeInfo();
view.setFields(desc.getSchema());
if (desc.getComment() != null) {
view.setProperty("comment", desc.getComment());
}
if (desc.getProperties() != null) {
view.getParameters().putAll(desc.getProperties());
}
if (!CollectionUtils.isEmpty(desc.getPartitionColumns())) {
view.setPartCols(desc.getPartitionColumns());
}
StorageFormat storageFormat = new StorageFormat(context.getConf());
storageFormat.fillDefaultStorageFormat(false, false);
view.setInputFormatClass(storageFormat.getInputFormat());
view.setOutputFormatClass(storageFormat.getOutputFormat());
if (desc.getOwnerName() != null) {
view.setOwner(desc.getOwnerName());
}
// Sets the column state for the create view statement (false since it is a creation).
// Similar to logic in CreateTableDesc.
StatsSetupConst.setStatsStateForCreateTable(view.getTTable().getParameters(), null, StatsSetupConst.FALSE);
return view;
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class AlterMaterializedViewRebuildAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
if (mvRebuildMode != MaterializationRebuildMode.NONE) {
super.analyzeInternal(root);
return;
}
ASTNode tableTree = (ASTNode) root.getChild(0);
TableName tableName = getQualifiedTableName(tableTree);
// now. However query scheduler requires the fully qualified table name.
if (ctx.isScheduledQuery()) {
unparseTranslator.addTableNameTranslation(tableTree, SessionState.get().getCurrentDatabase());
return;
}
try {
Boolean outdated = db.isOutdatedMaterializedView(getTxnMgr(), tableName);
if (outdated != null && !outdated) {
String msg = String.format("Materialized view %s.%s is up to date. Skipping rebuild.", tableName.getDb(), tableName.getTable());
LOG.info(msg);
console.printInfo(msg, false);
return;
}
} catch (HiveException e) {
LOG.warn("Error while checking materialized view " + tableName.getDb() + "." + tableName.getTable(), e);
}
ASTNode rewrittenAST = getRewrittenAST(tableName);
mvRebuildMode = MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD;
mvRebuildDbName = tableName.getDb();
mvRebuildName = tableName.getTable();
LOG.debug("Rebuilding materialized view " + tableName.getNotEmptyDbTable());
super.analyzeInternal(rewrittenAST);
queryState.setCommandType(HiveOperation.ALTER_MATERIALIZED_VIEW_REBUILD);
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class DriverTxnHandler method setWriteIdForAcidDdl.
private boolean setWriteIdForAcidDdl() throws SemanticException, LockException {
DDLDescWithWriteId acidDdlDesc = driverContext.getPlan().getAcidDdlDesc();
boolean hasAcidDdl = acidDdlDesc != null && acidDdlDesc.mayNeedWriteId();
if (hasAcidDdl) {
String fqTableName = acidDdlDesc.getFullTableName();
TableName tableName = HiveTableName.of(fqTableName);
long writeId = driverContext.getTxnManager().getTableWriteId(tableName.getDb(), tableName.getTable());
acidDdlDesc.setWriteId(writeId);
}
return hasAcidDdl;
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class DriverTxnHandler method setWriteIdForAcidFileSinks.
void setWriteIdForAcidFileSinks() throws SemanticException, LockException {
if (!driverContext.getPlan().getAcidSinks().isEmpty()) {
List<FileSinkDesc> acidSinks = new ArrayList<>(driverContext.getPlan().getAcidSinks());
// sorting makes tests easier to write since file names and ROW__IDs depend on statementId
// so this makes (file name -> data) mapping stable
acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) -> fsd1.getDirName().compareTo(fsd2.getDirName()));
// If the direct insert is on, sort the FSOs by moveTaskId as well because the dir is the same for all except the union use cases.
boolean isDirectInsertOn = false;
for (FileSinkDesc acidSink : acidSinks) {
if (acidSink.isDirectInsert()) {
isDirectInsertOn = true;
break;
}
}
if (isDirectInsertOn) {
acidSinks.sort((FileSinkDesc fsd1, FileSinkDesc fsd2) -> fsd1.getMoveTaskId().compareTo(fsd2.getMoveTaskId()));
}
int maxStmtId = -1;
for (FileSinkDesc acidSink : acidSinks) {
TableDesc tableInfo = acidSink.getTableInfo();
TableName tableName = HiveTableName.of(tableInfo.getTableName());
long writeId = driverContext.getTxnManager().getTableWriteId(tableName.getDb(), tableName.getTable());
acidSink.setTableWriteId(writeId);
/**
* it's possible to have > 1 FileSink writing to the same table/partition
* e.g. Merge stmt, multi-insert stmt when mixing DP and SP writes
* Insert ... Select ... Union All Select ... using
* {@link org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator#UNION_SUDBIR_PREFIX}
*/
acidSink.setStatementId(driverContext.getTxnManager().getStmtIdAndIncrement());
maxStmtId = Math.max(acidSink.getStatementId(), maxStmtId);
String unionAllSubdir = "/" + AbstractFileMergeOperator.UNION_SUDBIR_PREFIX;
if (acidSink.getInsertOverwrite() && acidSink.getDirName().toString().contains(unionAllSubdir) && acidSink.isFullAcidTable()) {
throw new UnsupportedOperationException("QueryId=" + driverContext.getPlan().getQueryId() + " is not supported due to OVERWRITE and UNION ALL. Please use truncate + insert");
}
}
if (HiveConf.getBoolVar(driverContext.getConf(), ConfVars.HIVE_EXTEND_BUCKET_ID_RANGE)) {
for (FileSinkDesc each : acidSinks) {
each.setMaxStmtId(maxStmtId);
}
}
}
}
Aggregations