use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project flink by apache.
the class HiveCatalog method alterPartition.
@Override
public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be null");
checkNotNull(newPartition, "New partition cannot be null");
// the target doesn't exist
try {
Table hiveTable = getHiveTable(tablePath);
boolean isHiveTable = isHiveTable(hiveTable.getParameters());
if (!isHiveTable) {
throw new CatalogException("Currently only supports partition for hive tables");
}
Partition hivePartition = getHivePartition(hiveTable, partitionSpec);
if (hivePartition == null) {
if (ignoreIfNotExists) {
return;
}
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
AlterTableOp op = HiveTableUtil.extractAlterTableOp(newPartition.getProperties());
if (op == null) {
throw new CatalogException(ALTER_TABLE_OP + " is missing for alter table operation");
}
alterTableViaProperties(op, null, null, hivePartition.getParameters(), newPartition.getProperties(), hivePartition.getSd());
client.alter_partition(tablePath.getDatabaseName(), tablePath.getObjectName(), hivePartition);
} catch (NoSuchObjectException e) {
if (!ignoreIfNotExists) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
}
} catch (InvalidOperationException | MetaException | TableNotExistException | PartitionSpecInvalidException e) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
} catch (TException e) {
throw new CatalogException(String.format("Failed to alter existing partition with new partition %s of table %s", partitionSpec, tablePath), e);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class AuthorizationPreEventListener method invalidOperationException.
private InvalidOperationException invalidOperationException(Exception e) {
InvalidOperationException ex = new InvalidOperationException(e.getMessage());
ex.initCause(e.getCause());
return ex;
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class HiveMetaStoreAuthorizer method onEvent.
@Override
public final void onEvent(PreEventContext preEventContext) throws MetaException, NoSuchObjectException, InvalidOperationException {
LOG.debug("==> HiveMetaStoreAuthorizer.onEvent(): EventType=" + preEventContext.getEventType());
try {
HiveMetaStoreAuthzInfo authzContext = buildAuthzContext(preEventContext);
if (!skipAuthorization(authzContext)) {
HiveAuthorizer hiveAuthorizer = createHiveMetaStoreAuthorizer();
checkPrivileges(authzContext, hiveAuthorizer);
}
} catch (Exception e) {
LOG.error("HiveMetaStoreAuthorizer.onEvent(): failed", e);
throw new MetaException(e.getMessage());
}
LOG.debug("<== HiveMetaStoreAuthorizer.onEvent(): EventType=" + preEventContext.getEventType());
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class AbstractAlterTableOperation method finalizeAlterTableWithWriteIdOp.
private void finalizeAlterTableWithWriteIdOp(Table table, Table oldTable, List<Partition> partitions, DDLOperationContext context, EnvironmentContext environmentContext) throws HiveException {
if (partitions == null) {
updateModifiedParameters(table.getTTable().getParameters(), context.getConf());
table.checkValidity(context.getConf());
} else {
for (Partition partition : partitions) {
updateModifiedParameters(partition.getParameters(), context.getConf());
}
}
try {
environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, desc.getType().name());
if (desc.getType() == AlterTableType.ADDPROPS) {
Map<String, String> oldTableParameters = oldTable.getParameters();
environmentContext.putToProperties(HiveMetaHook.SET_PROPERTIES, table.getParameters().entrySet().stream().filter(e -> !oldTableParameters.containsKey(e.getKey()) || !oldTableParameters.get(e.getKey()).equals(e.getValue())).map(Map.Entry::getKey).collect(Collectors.joining(HiveMetaHook.PROPERTIES_SEPARATOR)));
} else if (desc.getType() == AlterTableType.DROPPROPS) {
Map<String, String> newTableParameters = table.getParameters();
environmentContext.putToProperties(HiveMetaHook.UNSET_PROPERTIES, oldTable.getParameters().entrySet().stream().filter(e -> !newTableParameters.containsKey(e.getKey())).map(Map.Entry::getKey).collect(Collectors.joining(HiveMetaHook.PROPERTIES_SEPARATOR)));
}
if (partitions == null) {
long writeId = desc.getWriteId() != null ? desc.getWriteId() : 0;
try {
context.getDb().alterTable(desc.getDbTableName(), table, desc.isCascade(), environmentContext, true, writeId);
} catch (HiveException ex) {
if (Boolean.valueOf(environmentContext.getProperties().getOrDefault(HiveMetaHook.INITIALIZE_ROLLBACK_MIGRATION, "false"))) {
// in case of rollback of alter table do the following:
// 1. restore serde info and input/output format
// 2. remove table columns which are used to be partition columns
// 3. add partition columns
table.getSd().setInputFormat(oldTable.getSd().getInputFormat());
table.getSd().setOutputFormat(oldTable.getSd().getOutputFormat());
table.getSd().setSerdeInfo(oldTable.getSd().getSerdeInfo());
table.getSd().getCols().removeAll(oldTable.getPartitionKeys());
table.setPartCols(oldTable.getPartitionKeys());
table.getParameters().clear();
table.getParameters().putAll(oldTable.getParameters());
context.getDb().alterTable(desc.getDbTableName(), table, desc.isCascade(), environmentContext, true, writeId);
throw new HiveException("Error occurred during hive table migration to iceberg. Table properties " + "and serde info was reverted to its original value. Partition info was lost during the migration " + "process, but it can be reverted by running MSCK REPAIR on table/partition level.\n" + "Retrying the migration without issuing MSCK REPAIR on a partitioned table will result in an empty " + "iceberg table.");
} else {
throw ex;
}
}
} else {
// Note: this is necessary for UPDATE_STATISTICS command, that operates via ADDPROPS (why?).
// For any other updates, we don't want to do txn check on partitions when altering table.
boolean isTxn = false;
if (desc.getPartitionSpec() != null && desc.getType() == AlterTableType.ADDPROPS) {
// ADDPROPS is used to add replication properties like repl.last.id, which isn't
// transactional change. In case of replication check for transactional properties
// explicitly.
Map<String, String> props = desc.getProps();
if (desc.getReplicationSpec() != null && desc.getReplicationSpec().isInReplicationScope()) {
isTxn = (props.get(StatsSetupConst.COLUMN_STATS_ACCURATE) != null);
} else {
isTxn = true;
}
}
String qualifiedName = TableName.getDbTable(table.getTTable().getDbName(), table.getTTable().getTableName());
context.getDb().alterPartitions(qualifiedName, partitions, environmentContext, isTxn);
}
// Add constraints if necessary
if (desc instanceof AbstractAlterTableWithConstraintsDesc) {
AlterTableAddConstraintOperation.addConstraints((AbstractAlterTableWithConstraintsDesc) desc, context.getDb());
}
} catch (InvalidOperationException e) {
LOG.error("alter table: ", e);
throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
}
// Don't acquire locks for any of these, we have already asked for them in AbstractBaseAlterTableAnalyzer.
if (partitions != null) {
for (Partition partition : partitions) {
context.getWork().getInputs().add(new ReadEntity(partition));
DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context);
}
} else {
context.getWork().getInputs().add(new ReadEntity(oldTable));
DDLUtils.addIfAbsentByName(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK), context);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidOperationException in project hive by apache.
the class HCatClientHMSImpl method renameTable.
@Override
public void renameTable(String dbName, String oldName, String newName) throws HCatException {
Table tbl;
try {
Table oldtbl = hmsClient.getTable(checkDB(dbName), oldName);
if (oldtbl != null) {
// TODO : Should be moved out.
if (oldtbl.getParameters().get(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE) != null) {
throw new HCatException("Cannot use rename command on a non-native table");
}
tbl = new Table(oldtbl);
tbl.setTableName(newName);
hmsClient.alter_table(checkDB(dbName), oldName, tbl);
}
} catch (MetaException e) {
throw new HCatException("MetaException while renaming table", e);
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while renaming table", e);
} catch (InvalidOperationException e) {
throw new HCatException("InvalidOperationException while renaming table", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while renaming table", e);
}
}
Aggregations