use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class FlinkCalciteCatalogReaderTest method testGetFlinkPreparingTableBase.
@Test
public void testGetFlinkPreparingTableBase() {
// Mock CatalogSchemaTable.
final ObjectIdentifier objectIdentifier = ObjectIdentifier.of("a", "b", "c");
final ResolvedSchema schema = new ResolvedSchema(Collections.emptyList(), Collections.emptyList(), null);
final CatalogTable catalogTable = ConnectorCatalogTable.source(new TestTableSource(true, TableSchema.fromResolvedSchema(schema)), true);
final ResolvedCatalogTable resolvedCatalogTable = new ResolvedCatalogTable(catalogTable, schema);
CatalogSchemaTable mockTable = new CatalogSchemaTable(ContextResolvedTable.permanent(objectIdentifier, CatalogManagerMocks.createEmptyCatalog(), resolvedCatalogTable), FlinkStatistic.UNKNOWN(), true);
rootSchemaPlus.add(tableMockName, mockTable);
Prepare.PreparingTable preparingTable = catalogReader.getTable(Collections.singletonList(tableMockName));
assertTrue(preparingTable instanceof FlinkPreparingTableBase);
}
use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class SqlToOperationConverter method convertAlterTableCompact.
/**
* Convert `ALTER TABLE ... COMPACT` operation to {@link ModifyOperation} for Flink's managed
* table to trigger a compaction batch job.
*/
private ModifyOperation convertAlterTableCompact(ObjectIdentifier tableIdentifier, ContextResolvedTable contextResolvedTable, SqlAlterTableCompact alterTableCompact) {
Catalog catalog = catalogManager.getCatalog(tableIdentifier.getCatalogName()).orElse(null);
ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
if (ManagedTableListener.isManagedTable(catalog, resolvedCatalogTable)) {
Map<String, String> partitionKVs = alterTableCompact.getPartitionKVs();
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(Collections.emptyMap());
if (partitionKVs != null) {
List<String> partitionKeys = resolvedCatalogTable.getPartitionKeys();
Set<String> validPartitionKeySet = new HashSet<>(partitionKeys);
String exMsg = partitionKeys.isEmpty() ? String.format("Table %s is not partitioned.", tableIdentifier) : String.format("Available ordered partition columns: [%s]", partitionKeys.stream().collect(Collectors.joining("', '", "'", "'")));
partitionKVs.forEach((partitionKey, partitionValue) -> {
if (!validPartitionKeySet.contains(partitionKey)) {
throw new ValidationException(String.format("Partition column '%s' not defined in the table schema. %s", partitionKey, exMsg));
}
});
partitionSpec = new CatalogPartitionSpec(partitionKVs);
}
Map<String, String> compactOptions = catalogManager.resolveCompactManagedTableOptions(resolvedCatalogTable, tableIdentifier, partitionSpec);
QueryOperation child = new SourceQueryOperation(contextResolvedTable, compactOptions);
return new SinkModifyOperation(contextResolvedTable, child, partitionSpec.getPartitionSpec(), false, compactOptions);
}
throw new ValidationException(String.format("ALTER TABLE COMPACT operation is not supported for non-managed table %s", tableIdentifier));
}
use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.
the class DynamicSourceUtils method convertSourceToRel.
/**
* Converts a given {@link DynamicTableSource} to a {@link RelNode}. It adds helper projections
* if necessary.
*/
public static RelNode convertSourceToRel(boolean isBatchMode, ReadableConfig config, FlinkRelBuilder relBuilder, ContextResolvedTable contextResolvedTable, FlinkStatistic statistic, List<RelHint> hints, DynamicTableSource tableSource) {
final String tableDebugName = contextResolvedTable.getIdentifier().asSummaryString();
final ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
// 1. prepare table source
prepareDynamicSource(tableDebugName, resolvedCatalogTable, tableSource, isBatchMode, config);
// 2. push table scan
pushTableScan(isBatchMode, relBuilder, contextResolvedTable, statistic, hints, tableSource);
// 3. push project for non-physical columns
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
if (!schema.getColumns().stream().allMatch(Column::isPhysical)) {
pushMetadataProjection(relBuilder, schema);
pushGeneratedProjection(relBuilder, schema);
}
// 4. push watermark assigner
if (!isBatchMode && !schema.getWatermarkSpecs().isEmpty()) {
pushWatermarkAssigner(relBuilder, schema);
}
return relBuilder.build();
}
Aggregations