Search in sources :

Example 21 with ResolvedCatalogTable

use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.

the class FlinkCalciteCatalogReaderTest method testGetFlinkPreparingTableBase.

@Test
public void testGetFlinkPreparingTableBase() {
    // Mock CatalogSchemaTable.
    final ObjectIdentifier objectIdentifier = ObjectIdentifier.of("a", "b", "c");
    final ResolvedSchema schema = new ResolvedSchema(Collections.emptyList(), Collections.emptyList(), null);
    final CatalogTable catalogTable = ConnectorCatalogTable.source(new TestTableSource(true, TableSchema.fromResolvedSchema(schema)), true);
    final ResolvedCatalogTable resolvedCatalogTable = new ResolvedCatalogTable(catalogTable, schema);
    CatalogSchemaTable mockTable = new CatalogSchemaTable(ContextResolvedTable.permanent(objectIdentifier, CatalogManagerMocks.createEmptyCatalog(), resolvedCatalogTable), FlinkStatistic.UNKNOWN(), true);
    rootSchemaPlus.add(tableMockName, mockTable);
    Prepare.PreparingTable preparingTable = catalogReader.getTable(Collections.singletonList(tableMockName));
    assertTrue(preparingTable instanceof FlinkPreparingTableBase);
}
Also used : TestTableSource(org.apache.flink.table.planner.utils.TestTableSource) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) CatalogSchemaTable(org.apache.flink.table.planner.catalog.CatalogSchemaTable) Prepare(org.apache.calcite.prepare.Prepare) FlinkPreparingTableBase(org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase) ConnectorCatalogTable(org.apache.flink.table.catalog.ConnectorCatalogTable) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) Test(org.junit.Test)

Example 22 with ResolvedCatalogTable

use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.

the class SqlToOperationConverter method convertAlterTableCompact.

/**
 * Convert `ALTER TABLE ... COMPACT` operation to {@link ModifyOperation} for Flink's managed
 * table to trigger a compaction batch job.
 */
private ModifyOperation convertAlterTableCompact(ObjectIdentifier tableIdentifier, ContextResolvedTable contextResolvedTable, SqlAlterTableCompact alterTableCompact) {
    Catalog catalog = catalogManager.getCatalog(tableIdentifier.getCatalogName()).orElse(null);
    ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
    if (ManagedTableListener.isManagedTable(catalog, resolvedCatalogTable)) {
        Map<String, String> partitionKVs = alterTableCompact.getPartitionKVs();
        CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(Collections.emptyMap());
        if (partitionKVs != null) {
            List<String> partitionKeys = resolvedCatalogTable.getPartitionKeys();
            Set<String> validPartitionKeySet = new HashSet<>(partitionKeys);
            String exMsg = partitionKeys.isEmpty() ? String.format("Table %s is not partitioned.", tableIdentifier) : String.format("Available ordered partition columns: [%s]", partitionKeys.stream().collect(Collectors.joining("', '", "'", "'")));
            partitionKVs.forEach((partitionKey, partitionValue) -> {
                if (!validPartitionKeySet.contains(partitionKey)) {
                    throw new ValidationException(String.format("Partition column '%s' not defined in the table schema. %s", partitionKey, exMsg));
                }
            });
            partitionSpec = new CatalogPartitionSpec(partitionKVs);
        }
        Map<String, String> compactOptions = catalogManager.resolveCompactManagedTableOptions(resolvedCatalogTable, tableIdentifier, partitionSpec);
        QueryOperation child = new SourceQueryOperation(contextResolvedTable, compactOptions);
        return new SinkModifyOperation(contextResolvedTable, child, partitionSpec.getPartitionSpec(), false, compactOptions);
    }
    throw new ValidationException(String.format("ALTER TABLE COMPACT operation is not supported for non-managed table %s", tableIdentifier));
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) SourceQueryOperation(org.apache.flink.table.operations.SourceQueryOperation) SqlShowCurrentCatalog(org.apache.flink.sql.parser.dql.SqlShowCurrentCatalog) Catalog(org.apache.flink.table.catalog.Catalog) SqlUseCatalog(org.apache.flink.sql.parser.ddl.SqlUseCatalog) SqlDropCatalog(org.apache.flink.sql.parser.ddl.SqlDropCatalog) SqlCreateCatalog(org.apache.flink.sql.parser.ddl.SqlCreateCatalog) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) HashSet(java.util.HashSet) QueryOperation(org.apache.flink.table.operations.QueryOperation) SourceQueryOperation(org.apache.flink.table.operations.SourceQueryOperation)

Example 23 with ResolvedCatalogTable

use of org.apache.flink.table.catalog.ResolvedCatalogTable in project flink by apache.

the class DynamicSourceUtils method convertSourceToRel.

/**
 * Converts a given {@link DynamicTableSource} to a {@link RelNode}. It adds helper projections
 * if necessary.
 */
public static RelNode convertSourceToRel(boolean isBatchMode, ReadableConfig config, FlinkRelBuilder relBuilder, ContextResolvedTable contextResolvedTable, FlinkStatistic statistic, List<RelHint> hints, DynamicTableSource tableSource) {
    final String tableDebugName = contextResolvedTable.getIdentifier().asSummaryString();
    final ResolvedCatalogTable resolvedCatalogTable = contextResolvedTable.getResolvedTable();
    // 1. prepare table source
    prepareDynamicSource(tableDebugName, resolvedCatalogTable, tableSource, isBatchMode, config);
    // 2. push table scan
    pushTableScan(isBatchMode, relBuilder, contextResolvedTable, statistic, hints, tableSource);
    // 3. push project for non-physical columns
    final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
    if (!schema.getColumns().stream().allMatch(Column::isPhysical)) {
        pushMetadataProjection(relBuilder, schema);
        pushGeneratedProjection(relBuilder, schema);
    }
    // 4. push watermark assigner
    if (!isBatchMode && !schema.getWatermarkSpecs().isEmpty()) {
        pushWatermarkAssigner(relBuilder, schema);
    }
    return relBuilder.build();
}
Also used : ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Aggregations

ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)23 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)11 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)8 HashMap (java.util.HashMap)7 ValidationException (org.apache.flink.table.api.ValidationException)5 CatalogTable (org.apache.flink.table.catalog.CatalogTable)5 List (java.util.List)4 CatalogManager (org.apache.flink.table.catalog.CatalogManager)4 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)4 ExternalCatalogTable (org.apache.flink.table.catalog.ExternalCatalogTable)4 QueryOperation (org.apache.flink.table.operations.QueryOperation)4 Test (org.junit.Test)4 Optional (java.util.Optional)3 RelDataType (org.apache.calcite.rel.type.RelDataType)3 SchemaTranslator (org.apache.flink.table.catalog.SchemaTranslator)3 JsonSerdeTestUtil.configuredSerdeContext (org.apache.flink.table.planner.plan.nodes.exec.serde.JsonSerdeTestUtil.configuredSerdeContext)3 Test (org.junit.jupiter.api.Test)3 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 Map (java.util.Map)2 Collectors (java.util.stream.Collectors)2