use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class AbstractAlterTableRenameAnalyzer method analyzeCommand.
@Override
protected void analyzeCommand(TableName tableName, Map<String, String> partitionSpec, ASTNode command) throws SemanticException {
TableName target = getQualifiedTableName((ASTNode) command.getChild(0));
AlterTableRenameDesc desc = new AlterTableRenameDesc(tableName, null, isView(), target.getNotEmptyDbTable());
Table table = getTable(tableName.getNotEmptyDbTable(), true);
if (AcidUtils.isTransactionalTable(table)) {
setAcidDdlDesc(desc);
}
addInputsOutputsAlterTable(tableName, null, desc, desc.getType(), false);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class HiveAlterHandler method alterTable.
@Override
public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newt, EnvironmentContext environmentContext, IHMSHandler handler, String writeIdList) throws InvalidOperationException, MetaException {
catName = normalizeIdentifier(catName);
name = name.toLowerCase();
dbname = dbname.toLowerCase();
final boolean cascade;
final boolean replDataLocationChanged;
final boolean isReplicated;
if ((environmentContext != null) && environmentContext.isSetProperties()) {
cascade = StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(StatsSetupConst.CASCADE));
replDataLocationChanged = ReplConst.TRUE.equals(environmentContext.getProperties().get(ReplConst.REPL_DATA_LOCATION_CHANGED));
} else {
cascade = false;
replDataLocationChanged = false;
}
if (newt == null) {
throw new InvalidOperationException("New table is null");
}
String newTblName = newt.getTableName().toLowerCase();
String newDbName = newt.getDbName().toLowerCase();
if (!MetaStoreUtils.validateName(newTblName, handler.getConf())) {
throw new InvalidOperationException(newTblName + " is not a valid object name");
}
String validate = MetaStoreServerUtils.validateTblColumns(newt.getSd().getCols());
if (validate != null) {
throw new InvalidOperationException("Invalid column " + validate);
}
// Validate bucketedColumns in new table
List<String> bucketColumns = MetaStoreServerUtils.validateBucketColumns(newt.getSd());
if (CollectionUtils.isNotEmpty(bucketColumns)) {
String errMsg = "Bucket columns - " + bucketColumns.toString() + " doesn't match with any table columns";
LOG.error(errMsg);
throw new InvalidOperationException(errMsg);
}
Path srcPath = null;
FileSystem srcFs;
Path destPath = null;
FileSystem destFs = null;
boolean success = false;
boolean dataWasMoved = false;
boolean isPartitionedTable = false;
Database olddb = null;
Table oldt = null;
List<TransactionalMetaStoreEventListener> transactionalListeners = handler.getTransactionalListeners();
List<MetaStoreEventListener> listeners = handler.getListeners();
Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
try {
boolean rename = false;
List<Partition> parts;
// Switching tables between catalogs is not allowed.
if (!catName.equalsIgnoreCase(newt.getCatName())) {
throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" + catName + ", new catalog " + newt.getCatName());
}
// check if table with the new name already exists
if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
if (msdb.getTable(catName, newDbName, newTblName, null) != null) {
throw new InvalidOperationException("new table " + newDbName + "." + newTblName + " already exists");
}
rename = true;
}
msdb.openTransaction();
// get old table
// Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
olddb = msdb.getDatabase(catName, dbname);
oldt = msdb.getTable(catName, dbname, name, null);
if (oldt == null) {
throw new InvalidOperationException("table " + TableName.getQualified(catName, dbname, name) + " doesn't exist");
}
validateTableChangesOnReplSource(olddb, oldt, newt, environmentContext);
// On a replica this alter table will be executed only if old and new both the databases are
// available and being replicated into. Otherwise, it will be either create or drop of table.
isReplicated = HMSHandler.isDbReplicationTarget(olddb);
if (oldt.getPartitionKeysSize() != 0) {
isPartitionedTable = true;
}
// Throws InvalidOperationException if the new column types are not
// compatible with the current column types.
DefaultIncompatibleTableChangeHandler.get().allowChange(handler.getConf(), oldt, newt);
// check that partition keys have not changed, except for virtual views
// however, allow the partition comments to change
boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(), newt.getPartitionKeys());
if (!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
Map<String, String> properties = environmentContext.getProperties();
if (properties == null || (properties != null && !Boolean.parseBoolean(properties.getOrDefault(HiveMetaHook.ALLOW_PARTITION_KEY_CHANGE, "false")))) {
if (!partKeysPartiallyEqual) {
throw new InvalidOperationException("partition keys can not be changed.");
}
}
}
// 4) the table was not initially created with a specified location
if (replDataLocationChanged || (rename && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 || StringUtils.isEmpty(newt.getSd().getLocation())) && !MetaStoreUtils.isExternalTable(oldt))) {
srcPath = new Path(oldt.getSd().getLocation());
if (replDataLocationChanged) {
// If data location is changed in replication flow, then new path was already set in
// the newt. Also, it is as good as the data is moved and set dataWasMoved=true so that
// location in partitions are also updated accordingly.
// No need to validate if the destPath exists as in replication flow, data gets replicated
// separately.
destPath = new Path(newt.getSd().getLocation());
dataWasMoved = true;
} else {
// Rename flow.
// If a table was created in a user specified location using the DDL like
// create table tbl ... location ...., it should be treated like an external table
// in the table rename, its data location should not be changed. We can check
// if the table directory was created directly under its database directory to tell
// if it is such a table
String oldtRelativePath = wh.getDatabaseManagedPath(olddb).toUri().relativize(srcPath.toUri()).toString();
boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name) && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
if (!tableInSpecifiedLoc) {
srcFs = wh.getFs(srcPath);
// get new location
Database db = msdb.getDatabase(catName, newDbName);
assert (isReplicated == HMSHandler.isDbReplicationTarget(db));
Path databasePath = constructRenamedPath(wh.getDatabaseManagedPath(db), srcPath);
destPath = new Path(databasePath, newTblName);
destFs = wh.getFs(destPath);
newt.getSd().setLocation(destPath.toString());
// check that src and dest are on the same file system
if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
throw new InvalidOperationException("table new location " + destPath + " is on a different file system than the old location " + srcPath + ". This operation is not supported");
}
try {
if (destFs.exists(destPath)) {
throw new InvalidOperationException("New location for this table " + TableName.getQualified(catName, newDbName, newTblName) + " already exists : " + destPath);
}
// check that src exists and also checks permissions necessary, rename src to dest
if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, ReplChangeManager.shouldEnableCm(olddb, oldt))) {
dataWasMoved = true;
}
} catch (IOException | MetaException e) {
LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name + " failed to move data due to: '" + getSimpleMessage(e) + "' See hive log file for details.");
}
if (!HiveMetaStore.isRenameAllowed(olddb, db)) {
LOG.error("Alter Table operation for " + TableName.getQualified(catName, dbname, name) + "to new table = " + TableName.getQualified(catName, newDbName, newTblName) + " failed ");
throw new MetaException("Alter table not allowed for table " + TableName.getQualified(catName, dbname, name) + "to new table = " + TableName.getQualified(catName, newDbName, newTblName));
}
}
}
if (isPartitionedTable) {
String oldTblLocPath = srcPath.toUri().getPath();
String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
// also the location field in partition
parts = msdb.getPartitions(catName, dbname, name, -1);
Multimap<Partition, ColumnStatistics> columnStatsNeedUpdated = ArrayListMultimap.create();
for (Partition part : parts) {
String oldPartLoc = part.getSd().getLocation();
if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
URI oldUri = new Path(oldPartLoc).toUri();
String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
part.getSd().setLocation(newPartLocPath.toString());
}
part.setDbName(newDbName);
part.setTableName(newTblName);
List<ColumnStatistics> multiColStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), part.getSd().getCols(), oldt, part, null, null);
for (ColumnStatistics colStats : multiColStats) {
columnStatsNeedUpdated.put(part, colStats);
}
}
// Do not verify stats parameters on a partitioned table.
msdb.alterTable(catName, dbname, name, newt, null);
// alterPartition is only for changing the partition location in the table rename
if (dataWasMoved) {
int partsToProcess = parts.size();
int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
int batchStart = 0;
while (partsToProcess > 0) {
int batchEnd = Math.min(batchStart + partitionBatchSize, parts.size());
List<Partition> partBatch = parts.subList(batchStart, batchEnd);
int partBatchSize = partBatch.size();
partsToProcess -= partBatchSize;
batchStart += partBatchSize;
List<List<String>> partValues = new ArrayList<>(partBatchSize);
for (Partition part : partBatch) {
partValues.add(part.getValues());
}
msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch, newt.getWriteId(), writeIdList);
}
}
Deadline.checkTimeout();
for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entries()) {
ColumnStatistics newPartColStats = partColStats.getValue();
newPartColStats.getStatsDesc().setDbName(newDbName);
newPartColStats.getStatsDesc().setTableName(newTblName);
msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(), writeIdList, newt.getWriteId());
}
} else {
alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext, writeIdList, conf, null);
}
} else {
// operations other than table rename
if (MetaStoreServerUtils.requireCalStats(null, null, newt, environmentContext) && !isPartitionedTable) {
Database db = msdb.getDatabase(catName, newDbName);
assert (isReplicated == HMSHandler.isDbReplicationTarget(db));
// Update table stats. For partitioned table, we update stats in alterPartition()
MetaStoreServerUtils.updateTableStatsSlow(db, newt, wh, false, true, environmentContext);
}
if (isPartitionedTable) {
// Currently only column related changes can be cascaded in alter table
boolean runPartitionMetadataUpdate = (cascade && !MetaStoreServerUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols()));
// we may skip the update entirely if there are only new columns added
runPartitionMetadataUpdate |= !cascade && !MetaStoreServerUtils.arePrefixColumns(oldt.getSd().getCols(), newt.getSd().getCols());
boolean retainOnColRemoval = MetastoreConf.getBoolVar(handler.getConf(), MetastoreConf.ConfVars.COLSTATS_RETAIN_ON_COLUMN_REMOVAL);
if (runPartitionMetadataUpdate) {
if (cascade || retainOnColRemoval) {
parts = msdb.getPartitions(catName, dbname, name, -1);
for (Partition part : parts) {
Partition oldPart = new Partition(part);
List<FieldSchema> oldCols = part.getSd().getCols();
part.getSd().setCols(newt.getSd().getCols());
List<ColumnStatistics> colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), oldCols, oldt, part, null, null);
assert (colStats.isEmpty());
Deadline.checkTimeout();
if (cascade) {
msdb.alterPartition(catName, dbname, name, part.getValues(), part, writeIdList);
} else {
// update changed properties (stats)
oldPart.setParameters(part.getParameters());
msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, writeIdList);
}
}
} else {
// clear all column stats to prevent incorract behaviour in case same column is reintroduced
TableName tableName = new TableName(catName, dbname, name);
msdb.deleteAllPartitionColumnStatistics(tableName, writeIdList);
}
// Don't validate table-level stats for a partitoned table.
msdb.alterTable(catName, dbname, name, newt, null);
} else {
LOG.warn("Alter table not cascaded to partitions.");
alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext, writeIdList, conf, null);
}
} else {
alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext, writeIdList, conf, null);
}
}
if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_TABLE, new AlterTableEvent(oldt, newt, false, true, newt.getWriteId(), handler, isReplicated), environmentContext);
}
// commit the changes
success = msdb.commitTransaction();
} catch (InvalidObjectException e) {
LOG.debug("Failed to get object from Metastore ", e);
throw new InvalidOperationException("Unable to change partition or table." + " Check metastore logs for detailed stack." + e.getMessage());
} catch (InvalidInputException e) {
LOG.debug("Accessing Metastore failed due to invalid input ", e);
throw new InvalidOperationException("Unable to change partition or table." + " Check metastore logs for detailed stack." + e.getMessage());
} catch (NoSuchObjectException e) {
LOG.debug("Object not found in metastore ", e);
throw new InvalidOperationException("Unable to change partition or table. Object " + e.getMessage() + " does not exist." + " Check metastore logs for detailed stack.");
} finally {
if (success) {
// If data location is changed in replication flow, then need to delete the old path.
if (replDataLocationChanged) {
assert (olddb != null);
assert (oldt != null);
Path deleteOldDataLoc = new Path(oldt.getSd().getLocation());
boolean isSkipTrash = MetaStoreUtils.isSkipTrash(oldt.getParameters());
try {
wh.deleteDir(deleteOldDataLoc, true, isSkipTrash, ReplChangeManager.shouldEnableCm(olddb, oldt));
LOG.info("Deleted the old data location: {} for the table: {}", deleteOldDataLoc, dbname + "." + name);
} catch (MetaException ex) {
// Eat the exception as it doesn't affect the state of existing tables.
// Expect, user to manually drop this path when exception and so logging a warning.
LOG.warn("Unable to delete the old data location: {} for the table: {}", deleteOldDataLoc, dbname + "." + name);
}
}
} else {
LOG.error("Failed to alter table " + TableName.getQualified(catName, dbname, name));
msdb.rollbackTransaction();
if (!replDataLocationChanged && dataWasMoved) {
try {
if (destFs.exists(destPath)) {
if (!destFs.rename(destPath, srcPath)) {
LOG.error("Failed to restore data from " + destPath + " to " + srcPath + " in alter table failure. Manual restore is needed.");
}
}
} catch (IOException e) {
LOG.error("Failed to restore data from " + destPath + " to " + srcPath + " in alter table failure. Manual restore is needed.");
}
}
}
}
if (!listeners.isEmpty()) {
// I don't think event notifications in case of failures are necessary, but other HMS operations
// make this call whether the event failed or succeeded. To make this behavior consistent,
// this call is made for failed events also.
MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE, new AlterTableEvent(oldt, newt, false, success, newt.getWriteId(), handler, isReplicated), environmentContext, txnAlterTableEventResponses, msdb);
}
}
Aggregations