use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class SqlToOperationConverter method convertAlterTableCompact.
/**
* Convert `ALTER TABLE ... COMPACT` operation to {@link ModifyOperation} for Flink's managed
* table to trigger a compaction batch job.
*/
private ModifyOperation convertAlterTableCompact(ObjectIdentifier tableIdentifier, ContextResolvedTable contextResolvedTable, SqlAlterTableCompact alterTableCompact) {
Catalog catalog = catalogManager.getCatalog(tableIdentifier.getCatalogName()).orElse(null);
ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
if (ManagedTableListener.isManagedTable(catalog, resolvedCatalogTable)) {
Map<String, String> partitionKVs = alterTableCompact.getPartitionKVs();
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(Collections.emptyMap());
if (partitionKVs != null) {
List<String> partitionKeys = resolvedCatalogTable.getPartitionKeys();
Set<String> validPartitionKeySet = new HashSet<>(partitionKeys);
String exMsg = partitionKeys.isEmpty() ? String.format("Table %s is not partitioned.", tableIdentifier) : String.format("Available ordered partition columns: [%s]", partitionKeys.stream().collect(Collectors.joining("', '", "'", "'")));
partitionKVs.forEach((partitionKey, partitionValue) -> {
if (!validPartitionKeySet.contains(partitionKey)) {
throw new ValidationException(String.format("Partition column '%s' not defined in the table schema. %s", partitionKey, exMsg));
}
});
partitionSpec = new CatalogPartitionSpec(partitionKVs);
}
Map<String, String> compactOptions = catalogManager.resolveCompactManagedTableOptions(resolvedCatalogTable, tableIdentifier, partitionSpec);
QueryOperation child = new SourceQueryOperation(contextResolvedTable, compactOptions);
return new SinkModifyOperation(contextResolvedTable, child, partitionSpec.getPartitionSpec(), false, compactOptions);
}
throw new ValidationException(String.format("ALTER TABLE COMPACT operation is not supported for non-managed table %s", tableIdentifier));
}
use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class DatabaseCalciteSchema method extractTableStats.
private TableStats extractTableStats(ContextResolvedTable lookupResult, ObjectIdentifier identifier) {
if (lookupResult.isTemporary()) {
return TableStats.UNKNOWN;
}
final Catalog catalog = lookupResult.getCatalog().orElseThrow(IllegalStateException::new);
final ObjectPath tablePath = identifier.toObjectPath();
try {
final CatalogTableStatistics tableStatistics = catalog.getTableStatistics(tablePath);
final CatalogColumnStatistics columnStatistics = catalog.getTableColumnStatistics(tablePath);
return convertToTableStats(tableStatistics, columnStatistics);
} catch (TableNotExistException e) {
throw new ValidationException(format("Could not get statistic for table: [%s, %s, %s]", identifier.getCatalogName(), tablePath.getDatabaseName(), tablePath.getObjectName()), e);
}
}
use of org.apache.flink.table.catalog.Catalog in project flink by apache.
the class FactoryUtilTest method testCreateCatalog.
@Test
public void testCreateCatalog() {
final Map<String, String> options = new HashMap<>();
options.put(CommonCatalogOptions.CATALOG_TYPE.key(), TestCatalogFactory.IDENTIFIER);
options.put(TestCatalogFactory.DEFAULT_DATABASE.key(), "my-database");
final Catalog catalog = FactoryUtil.createCatalog("my-catalog", options, null, Thread.currentThread().getContextClassLoader());
assertThat(catalog).isInstanceOf(TestCatalogFactory.TestCatalog.class);
final TestCatalogFactory.TestCatalog testCatalog = (TestCatalogFactory.TestCatalog) catalog;
assertThat("my-catalog").isEqualTo(testCatalog.getName());
assertThat("my-database").isEqualTo(testCatalog.getOptions().get(TestCatalogFactory.DEFAULT_DATABASE.key()));
}
Aggregations