Search in sources :

Example 21 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DataTypeUtilsTest method testExpandStructuredType.

@Test
public void testExpandStructuredType() {
    StructuredType logicalType = StructuredType.newBuilder(ObjectIdentifier.of("catalog", "database", "type")).attributes(Arrays.asList(new StructuredType.StructuredAttribute("f0", DataTypes.INT().getLogicalType()), new StructuredType.StructuredAttribute("f1", DataTypes.STRING().getLogicalType()), new StructuredType.StructuredAttribute("f2", DataTypes.TIMESTAMP(5).getLogicalType()), new StructuredType.StructuredAttribute("f3", DataTypes.TIMESTAMP(3).getLogicalType()))).build();
    List<DataType> dataTypes = Arrays.asList(DataTypes.INT(), DataTypes.STRING(), DataTypes.TIMESTAMP(5).bridgedTo(Timestamp.class), DataTypes.TIMESTAMP(3));
    FieldsDataType dataType = new FieldsDataType(logicalType, dataTypes);
    ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(dataType);
    assertThat(schema).isEqualTo(ResolvedSchema.of(Column.physical("f0", INT()), Column.physical("f1", STRING()), Column.physical("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), Column.physical("f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class))));
}
Also used : LocalDateTime(java.time.LocalDateTime) FieldsDataType(org.apache.flink.table.types.FieldsDataType) DataType(org.apache.flink.table.types.DataType) FieldsDataType(org.apache.flink.table.types.FieldsDataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Timestamp(java.sql.Timestamp) StructuredType(org.apache.flink.table.types.logical.StructuredType) Test(org.junit.Test)

Example 22 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class TypeMappingUtilsTest method testCheckPhysicalLogicalTypeCompatible.

@Test
public void testCheckPhysicalLogicalTypeCompatible() {
    TableSchema tableSchema = TableSchema.builder().field("a", DataTypes.VARCHAR(2)).field("b", DataTypes.DECIMAL(20, 2)).build();
    TableSink tableSink = new TestTableSink(tableSchema);
    LegacyTypeInformationType legacyDataType = (LegacyTypeInformationType) tableSink.getConsumedDataType().getLogicalType();
    TypeInformation legacyTypeInfo = ((TupleTypeInfo) legacyDataType.getTypeInformation()).getTypeAt(1);
    DataType physicalType = TypeConversions.fromLegacyInfoToDataType(legacyTypeInfo);
    ResolvedSchema physicSchema = DataTypeUtils.expandCompositeTypeToSchema(physicalType);
    DataType[] logicalDataTypes = tableSchema.getFieldDataTypes();
    List<DataType> physicalDataTypes = physicSchema.getColumnDataTypes();
    for (int i = 0; i < logicalDataTypes.length; i++) {
        TypeMappingUtils.checkPhysicalLogicalTypeCompatible(physicalDataTypes.get(i).getLogicalType(), logicalDataTypes[i].getLogicalType(), "physicalField", "logicalField", false);
    }
}
Also used : TableSchema(org.apache.flink.table.api.TableSchema) DataType(org.apache.flink.table.types.DataType) TableSink(org.apache.flink.table.sinks.TableSink) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) LegacyTypeInformationType(org.apache.flink.table.types.logical.LegacyTypeInformationType) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) TupleTypeInfo(org.apache.flink.api.java.typeutils.TupleTypeInfo) Test(org.junit.Test)

Example 23 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class TemporalTableSourceSpecSerdeTest method testTemporalTableSourceSpecSerde.

public static Stream<TemporalTableSourceSpec> testTemporalTableSourceSpecSerde() {
    Map<String, String> options1 = new HashMap<>();
    options1.put("connector", "filesystem");
    options1.put("format", "testcsv");
    options1.put("path", "/tmp");
    final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
    final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
    ResolvedCatalogTable resolvedCatalogTable = new ResolvedCatalogTable(catalogTable1, resolvedSchema1);
    RelDataType relDataType1 = FACTORY.createSqlType(SqlTypeName.BIGINT);
    LookupTableSource lookupTableSource = new TestValuesTableFactory.MockedLookupTableSource();
    TableSourceTable tableSourceTable1 = new TableSourceTable(null, relDataType1, FlinkStatistic.UNKNOWN(), lookupTableSource, true, ContextResolvedTable.temporary(ObjectIdentifier.of("default_catalog", "default_db", "MyTable"), resolvedCatalogTable), FLINK_CONTEXT, new SourceAbilitySpec[] { new LimitPushDownSpec(100) });
    TemporalTableSourceSpec temporalTableSourceSpec1 = new TemporalTableSourceSpec(tableSourceTable1);
    return Stream.of(temporalTableSourceSpec1);
}
Also used : LimitPushDownSpec(org.apache.flink.table.planner.plan.abilities.source.LimitPushDownSpec) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) HashMap(java.util.HashMap) TemporalTableSourceSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.TemporalTableSourceSpec) LookupTableSource(org.apache.flink.table.connector.source.LookupTableSource) RelDataType(org.apache.calcite.rel.type.RelDataType) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 24 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class DataGenTableSourceFactoryTest method testDataTypeCoverage.

@Test
public void testDataTypeCoverage() throws Exception {
    ResolvedSchema schema = ResolvedSchema.of(Column.physical("f0", DataTypes.CHAR(1)), Column.physical("f1", DataTypes.VARCHAR(10)), Column.physical("f2", DataTypes.STRING()), Column.physical("f3", DataTypes.BOOLEAN()), Column.physical("f4", DataTypes.DECIMAL(32, 2)), Column.physical("f5", DataTypes.TINYINT()), Column.physical("f6", DataTypes.SMALLINT()), Column.physical("f7", DataTypes.INT()), Column.physical("f8", DataTypes.BIGINT()), Column.physical("f9", DataTypes.FLOAT()), Column.physical("f10", DataTypes.DOUBLE()), Column.physical("f11", DataTypes.DATE()), Column.physical("f12", DataTypes.TIME()), Column.physical("f13", DataTypes.TIMESTAMP()), Column.physical("f14", DataTypes.TIMESTAMP_WITH_TIME_ZONE()), Column.physical("f15", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE()), Column.physical("f16", DataTypes.INTERVAL(DataTypes.DAY())), Column.physical("f17", DataTypes.ARRAY(DataTypes.INT())), Column.physical("f18", DataTypes.MAP(DataTypes.STRING(), DataTypes.DATE())), Column.physical("f19", DataTypes.MULTISET(DataTypes.DECIMAL(32, 2))), Column.physical("f20", DataTypes.ROW(DataTypes.FIELD("a", DataTypes.BIGINT()), DataTypes.FIELD("b", DataTypes.TIME()), DataTypes.FIELD("c", DataTypes.ROW(DataTypes.FIELD("d", DataTypes.TIMESTAMP()))))));
    DescriptorProperties descriptor = new DescriptorProperties();
    descriptor.putString(FactoryUtil.CONNECTOR.key(), "datagen");
    descriptor.putString(DataGenConnectorOptions.NUMBER_OF_ROWS.key(), "10");
    // add min max option for numeric types
    descriptor.putString("fields.f4.min", "1.0");
    descriptor.putString("fields.f4.max", "1000.0");
    descriptor.putString("fields.f5.min", "0");
    descriptor.putString("fields.f5.max", "127");
    descriptor.putString("fields.f6.min", "0");
    descriptor.putString("fields.f6.max", "32767");
    descriptor.putString("fields.f7.min", "0");
    descriptor.putString("fields.f7.max", "65535");
    descriptor.putString("fields.f8.min", "0");
    descriptor.putString("fields.f8.max", String.valueOf(Long.MAX_VALUE));
    descriptor.putString("fields.f9.min", "0");
    descriptor.putString("fields.f9.max", String.valueOf(Float.MAX_VALUE));
    descriptor.putString("fields.f10.min", "0");
    descriptor.putString("fields.f10.max", String.valueOf(Double.MAX_VALUE));
    List<RowData> results = runGenerator(schema, descriptor);
    Assert.assertEquals("Failed to generate all rows", 10, results.size());
    for (RowData row : results) {
        for (int i = 0; i < row.getArity(); i++) {
            Assert.assertFalse("Column " + schema.getColumnNames().get(i) + " should not be null", row.isNullAt(i));
        }
    }
}
Also used : GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) DescriptorProperties(org.apache.flink.table.descriptors.DescriptorProperties) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) DataGeneratorSourceTest(org.apache.flink.streaming.api.functions.source.datagen.DataGeneratorSourceTest) Test(org.junit.Test)

Example 25 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class PushProjectIntoTableSourceScanRule method onMatch.

@Override
public void onMatch(RelOptRuleCall call) {
    final LogicalProject project = call.rel(0);
    final LogicalTableScan scan = call.rel(1);
    final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
    final boolean supportsNestedProjection = supportsNestedProjection(sourceTable.tableSource());
    final int[] refFields = RexNodeExtractor.extractRefInputFields(project.getProjects());
    if (!supportsNestedProjection && refFields.length == scan.getRowType().getFieldCount()) {
        // There is no top-level projection and nested projections aren't supported.
        return;
    }
    final FlinkTypeFactory typeFactory = unwrapTypeFactory(scan);
    final ResolvedSchema schema = sourceTable.contextResolvedTable().getResolvedSchema();
    final RowType producedType = createProducedType(schema, sourceTable.tableSource());
    final NestedSchema projectedSchema = NestedProjectionUtil.build(getProjections(project, scan), typeFactory.buildRelNodeRowType(producedType));
    if (!supportsNestedProjection) {
        for (NestedColumn column : projectedSchema.columns().values()) {
            column.markLeaf();
        }
    }
    final List<SourceAbilitySpec> abilitySpecs = new ArrayList<>();
    final RowType newProducedType = performPushDown(sourceTable, projectedSchema, producedType, abilitySpecs);
    final DynamicTableSource newTableSource = sourceTable.tableSource().copy();
    final SourceAbilityContext context = SourceAbilityContext.from(scan);
    abilitySpecs.forEach(spec -> spec.apply(newTableSource, context));
    final RelDataType newRowType = typeFactory.buildRelNodeRowType(newProducedType);
    final TableSourceTable newSource = sourceTable.copy(newTableSource, newRowType, abilitySpecs.toArray(new SourceAbilitySpec[0]));
    final LogicalTableScan newScan = new LogicalTableScan(scan.getCluster(), scan.getTraitSet(), scan.getHints(), newSource);
    final LogicalProject newProject = project.copy(project.getTraitSet(), newScan, rewriteProjections(call, newSource, projectedSchema), project.getRowType());
    if (ProjectRemoveRule.isTrivial(newProject)) {
        call.transformTo(newScan);
    } else {
        call.transformTo(newProject);
    }
}
Also used : SourceAbilitySpec(org.apache.flink.table.planner.plan.abilities.source.SourceAbilitySpec) ArrayList(java.util.ArrayList) RowType(org.apache.flink.table.types.logical.RowType) NestedColumn(org.apache.flink.table.planner.plan.utils.NestedColumn) RelDataType(org.apache.calcite.rel.type.RelDataType) LogicalTableScan(org.apache.calcite.rel.logical.LogicalTableScan) FlinkTypeFactory(org.apache.flink.table.planner.calcite.FlinkTypeFactory) SourceAbilityContext(org.apache.flink.table.planner.plan.abilities.source.SourceAbilityContext) LogicalProject(org.apache.calcite.rel.logical.LogicalProject) TableSourceTable(org.apache.flink.table.planner.plan.schema.TableSourceTable) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) NestedSchema(org.apache.flink.table.planner.plan.utils.NestedSchema)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6