use of org.apache.hadoop.hive.ql.ddl.DDLOperationContext in project hive by apache.
the class AbstractAlterTableOperation method finalizeAlterTableWithWriteIdOp.
private void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List<Partition> partitions, DDLOperationContext context, EnvironmentContext environmentContext) throws HiveException {
if (partitions == null) {
updateModifiedParameters(table.getTTable().getParameters(), context.getConf());
table.checkValidity(context.getConf());
} else {
for (Partition partition : partitions) {
updateModifiedParameters(partition.getParameters(), context.getConf());
}
}
try {
environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, desc.getType().name());
if (desc.getType() == AlterTableType.ADDPROPS) {
Map<String, String> oldTableParameters = oldTable.getParameters();
environmentContext.putToProperties(HiveMetaHook.SET_PROPERTIES, table.getParameters().entrySet().stream().filter(e -> !oldTableParameters.containsKey(e.getKey()) || !oldTableParameters.get(e.getKey()).equals(e.getValue())).map(Map.Entry::getKey).collect(Collectors.joining(HiveMetaHook.PROPERTIES_SEPARATOR)));
} else if (desc.getType() == AlterTableType.DROPPROPS) {
Map<String, String> newTableParameters = table.getParameters();
environmentContext.putToProperties(HiveMetaHook.UNSET_PROPERTIES, oldTable.getParameters().entrySet().stream().filter(e -> !newTableParameters.containsKey(e.getKey())).map(Map.Entry::getKey).collect(Collectors.joining(HiveMetaHook.PROPERTIES_SEPARATOR)));
}
if (partitions == null) {
long writeId = desc.getWriteId() != null ? desc.getWriteId() : 0;
try {
context.getDb().alterTable(desc.getDbTableName(), table, desc.isCascade(), environmentContext, true, writeId);
} catch (HiveException ex) {
if (Boolean.valueOf(environmentContext.getProperties().getOrDefault(HiveMetaHook.INITIALIZE_ROLLBACK_MIGRATION, "false"))) {
// in case of rollback of alter table do the following:
// 1. restore serde info and input/output format
// 2. remove table columns which are used to be partition columns
// 3. add partition columns
table.getSd().setInputFormat(oldTable.getSd().getInputFormat());
table.getSd().setOutputFormat(oldTable.getSd().getOutputFormat());
table.getSd().setSerdeInfo(oldTable.getSd().getSerdeInfo());
table.getSd().getCols().removeAll(oldTable.getPartitionKeys());
table.setPartCols(oldTable.getPartitionKeys());
table.getParameters().clear();
table.getParameters().putAll(oldTable.getParameters());
context.getDb().alterTable(desc.getDbTableName(), table, desc.isCascade(), environmentContext, true, writeId);
throw new HiveException("Error occurred during hive table migration to iceberg. Table properties " + "and serde info was reverted to its original value. Partition info was lost during the migration " + "process, but it can be reverted by running MSCK REPAIR on table/partition level.\n" + "Retrying the migration without issuing MSCK REPAIR on a partitioned table will result in an empty " + "iceberg table.");
} else {
throw ex;
}
}
} else {
// Note: this is necessary for UPDATE_STATISTICS command, that operates via ADDPROPS (why?).
// For any other updates, we don't want to do txn check on partitions when altering table.
boolean isTxn = false;
if (desc.getPartitionSpec() != null && desc.getType() == AlterTableType.ADDPROPS) {
// ADDPROPS is used to add replication properties like repl.last.id, which isn't
// transactional change. In case of replication check for transactional properties
// explicitly.
Map<String, String> props = desc.getProps();
if (desc.getReplicationSpec() != null && desc.getReplicationSpec().isInReplicationScope()) {
isTxn = (props.get(StatsSetupConst.COLUMN_STATS_ACCURATE) != null);
} else {
isTxn = true;
}
}
String qualifiedName = TableName.getDbTable(table.getTTable().getDbName(), table.getTTable().getTableName());
context.getDb().alterPartitions(qualifiedName, partitions, environmentContext, isTxn);
}
// Add constraints if necessary
if (desc instanceof AbstractAlterTableWithConstraintsDesc) {
AlterTableAddConstraintOperation.addConstraints((AbstractAlterTableWithConstraintsDesc) desc, context.getDb());
}
} catch (InvalidOperationException e) {
LOG.error("alter table: ", e);
throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
}
// Don't acquire locks for any of these, we have already asked for them in AbstractBaseAlterTableAnalyzer.
if (partitions != null) {
for (Partition partition : partitions) {
context.getWork().getInputs().add(new ReadEntity(partition));
DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context);
}
} else {
context.getWork().getInputs().add(new ReadEntity(oldTable));
DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context);
}
}
Aggregations