use of org.apache.iceberg.exceptions.NoSuchTableException in project metacat by Netflix.
the class IcebergTableHandler method getIcebergTable.
/**
* get iceberg table.
*
* @param tableName table name
* @param tableMetadataLocation table metadata location
* @param includeInfoDetails if true, will include more details like the manifest file content
* @return iceberg table
*/
public IcebergTableWrapper getIcebergTable(final QualifiedName tableName, final String tableMetadataLocation, final boolean includeInfoDetails) {
final long start = this.registry.clock().wallTime();
try {
this.icebergTableCriteria.checkCriteria(tableName, tableMetadataLocation);
log.debug("Loading icebergTable {} from {}", tableName, tableMetadataLocation);
final IcebergMetastoreTables icebergMetastoreTables = new IcebergMetastoreTables(new IcebergTableOps(conf, tableMetadataLocation, connectorContext.getConfig(), icebergTableOpsProxy));
final Table table = icebergMetastoreTables.loadTable(HiveTableUtil.qualifiedNameToTableIdentifier(tableName));
final Map<String, String> extraProperties = Maps.newHashMap();
if (includeInfoDetails) {
extraProperties.put(DirectSqlTable.PARAM_METADATA_CONTENT, TableMetadataParser.toJson(icebergMetastoreTables.getTableOps().current()));
}
return new IcebergTableWrapper(table, extraProperties);
} catch (NotFoundException | NoSuchTableException e) {
throw new InvalidMetaException(tableName, e);
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("Time taken to getIcebergTable {} is {} ms", tableName, duration);
this.recordTimer(IcebergRequestMetrics.TagLoadTable.getMetricName(), duration);
this.increaseCounter(IcebergRequestMetrics.TagLoadTable.getMetricName(), tableName);
}
}
use of org.apache.iceberg.exceptions.NoSuchTableException in project hive by apache.
the class HiveCatalog method dropTable.
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
return false;
}
String database = identifier.namespace().level(0);
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata;
if (purge && ops.current() != null) {
lastMetadata = ops.current();
} else {
lastMetadata = null;
}
try {
clients.run(client -> {
client.dropTable(database, identifier.name(), false, /* do not delete data */
false);
return null;
});
if (purge && lastMetadata != null) {
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
LOG.info("Dropped table: {}", identifier);
return true;
} catch (NoSuchTableException | NoSuchObjectException e) {
LOG.info("Skipping drop, table does not exist: {}", identifier, e);
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop " + identifier, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to dropTable", e);
}
}
use of org.apache.iceberg.exceptions.NoSuchTableException in project hive by apache.
the class HiveTableOperations method doRefresh.
@Override
protected void doRefresh() {
String metadataLocation = null;
try {
Table table = metaClients.run(client -> client.getTable(database, tableName));
validateTableIsIceberg(table, fullName);
metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
} catch (NoSuchObjectException e) {
if (currentMetadataLocation() != null) {
throw new NoSuchTableException("No such table: %s.%s", database, tableName);
}
} catch (TException e) {
String errMsg = String.format("Failed to get table info from metastore %s.%s", database, tableName);
throw new RuntimeException(errMsg, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during refresh", e);
}
refreshFromMetadataLocation(metadataLocation, HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_ICEBERG_METADATA_REFRESH_MAX_RETRIES));
}
use of org.apache.iceberg.exceptions.NoSuchTableException in project hive by apache.
the class HiveIcebergMetaHook method preAlterTable.
@Override
public void preAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTable, EnvironmentContext context) throws MetaException {
setupAlterOperationType(hmsTable, context);
catalogProperties = getCatalogProperties(hmsTable);
try {
icebergTable = IcebergTableUtil.getTable(conf, catalogProperties);
} catch (NoSuchTableException nte) {
context.getProperties().put(MIGRATE_HIVE_TO_ICEBERG, "true");
// If the iceberg table does not exist, then this is an ALTER command aimed at migrating the table to iceberg
// First we must check whether it's eligible for migration to iceberg
// If so, we will create the iceberg table in commitAlterTable and go ahead with the migration
assertTableCanBeMigrated(hmsTable);
isTableMigration = true;
StorageDescriptor sd = hmsTable.getSd();
preAlterTableProperties = new PreAlterTableProperties();
preAlterTableProperties.tableLocation = sd.getLocation();
preAlterTableProperties.format = sd.getInputFormat();
preAlterTableProperties.schema = schema(catalogProperties, hmsTable);
preAlterTableProperties.partitionKeys = hmsTable.getPartitionKeys();
context.getProperties().put(HiveMetaHook.ALLOW_PARTITION_KEY_CHANGE, "true");
// If there are partition keys specified remove them from the HMS table and add them to the column list
if (hmsTable.isSetPartitionKeys() && !hmsTable.getPartitionKeys().isEmpty()) {
List<PartitionTransformSpec> spec = PartitionTransform.getPartitionTransformSpec(hmsTable.getPartitionKeys());
if (!SessionStateUtil.addResource(conf, hive_metastoreConstants.PARTITION_TRANSFORM_SPEC, spec)) {
throw new MetaException("Query state attached to Session state must be not null. " + "Partition transform metadata cannot be saved.");
}
hmsTable.getSd().getCols().addAll(hmsTable.getPartitionKeys());
hmsTable.setPartitionKeysIsSet(false);
}
preAlterTableProperties.spec = spec(conf, preAlterTableProperties.schema, hmsTable);
sd.setInputFormat(HiveIcebergInputFormat.class.getCanonicalName());
sd.setOutputFormat(HiveIcebergOutputFormat.class.getCanonicalName());
sd.setSerdeInfo(new SerDeInfo("icebergSerde", HiveIcebergSerDe.class.getCanonicalName(), Collections.emptyMap()));
setCommonHmsTablePropertiesForIceberg(hmsTable);
// set an additional table prop to designate that this table has been migrated to Iceberg, i.e.
// all or some of its data files have not been written out using the Iceberg writer, and therefore those data
// files do not contain Iceberg field IDs. This makes certain schema evolution operations problematic, so we
// want to disable these ops for now using this new table prop
hmsTable.getParameters().put(MIGRATED_TO_ICEBERG, "true");
NameMapping nameMapping = MappingUtil.create(preAlterTableProperties.schema);
hmsTable.getParameters().put(TableProperties.DEFAULT_NAME_MAPPING, NameMappingParser.toJson(nameMapping));
}
if (AlterTableType.ADDCOLS.equals(currentAlterTableOp)) {
handleAddColumns(hmsTable);
} else if (AlterTableType.REPLACE_COLUMNS.equals(currentAlterTableOp)) {
assertNotMigratedTable(hmsTable.getParameters(), currentAlterTableOp.getName().toUpperCase());
handleReplaceColumns(hmsTable);
} else if (AlterTableType.RENAME_COLUMN.equals(currentAlterTableOp)) {
// passing in the "CHANGE COLUMN" string instead, since RENAME COLUMN is not part of SQL syntax (not to mention
// that users can change data types or reorder columns too with this alter op type, so its name is misleading..)
assertNotMigratedTable(hmsTable.getParameters(), "CHANGE COLUMN");
handleChangeColumn(hmsTable);
}
}
use of org.apache.iceberg.exceptions.NoSuchTableException in project hive by apache.
the class HiveIcebergMetaHook method preCreateTable.
@Override
public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) {
this.catalogProperties = getCatalogProperties(hmsTable);
// Set the table type even for non HiveCatalog based tables
hmsTable.getParameters().put(BaseMetastoreTableOperations.TABLE_TYPE_PROP, BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase());
if (!Catalogs.hiveCatalog(conf, catalogProperties)) {
// For non-HiveCatalog tables too, we should set the input and output format
// so that the table can be read by other engines like Impala
hmsTable.getSd().setInputFormat(HiveIcebergInputFormat.class.getCanonicalName());
hmsTable.getSd().setOutputFormat(HiveIcebergOutputFormat.class.getCanonicalName());
// If not using HiveCatalog check for existing table
try {
this.icebergTable = IcebergTableUtil.getTable(conf, catalogProperties);
Preconditions.checkArgument(catalogProperties.getProperty(InputFormatConfig.TABLE_SCHEMA) == null, "Iceberg table already created - can not use provided schema");
Preconditions.checkArgument(catalogProperties.getProperty(InputFormatConfig.PARTITION_SPEC) == null, "Iceberg table already created - can not use provided partition specification");
LOG.info("Iceberg table already exists {}", icebergTable);
return;
} catch (NoSuchTableException nte) {
// If the table does not exist we will create it below
}
}
// If the table does not exist collect data for table creation
// - InputFormatConfig.TABLE_SCHEMA, InputFormatConfig.PARTITION_SPEC takes precedence so the user can override the
// Iceberg schema and specification generated by the code
Schema schema = schema(catalogProperties, hmsTable);
PartitionSpec spec = spec(conf, schema, hmsTable);
// If there are partition keys specified remove them from the HMS table and add them to the column list
if (hmsTable.isSetPartitionKeys()) {
hmsTable.getSd().getCols().addAll(hmsTable.getPartitionKeys());
hmsTable.setPartitionKeysIsSet(false);
}
catalogProperties.put(InputFormatConfig.TABLE_SCHEMA, SchemaParser.toJson(schema));
catalogProperties.put(InputFormatConfig.PARTITION_SPEC, PartitionSpecParser.toJson(spec));
setCommonHmsTablePropertiesForIceberg(hmsTable);
}
Aggregations