use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DataTypeUtilsTest method testExpandStructuredType.
@Test
public void testExpandStructuredType() {
StructuredType logicalType = StructuredType.newBuilder(ObjectIdentifier.of("catalog", "database", "type")).attributes(Arrays.asList(new StructuredType.StructuredAttribute("f0", DataTypes.INT().getLogicalType()), new StructuredType.StructuredAttribute("f1", DataTypes.STRING().getLogicalType()), new StructuredType.StructuredAttribute("f2", DataTypes.TIMESTAMP(5).getLogicalType()), new StructuredType.StructuredAttribute("f3", DataTypes.TIMESTAMP(3).getLogicalType()))).build();
List<DataType> dataTypes = Arrays.asList(DataTypes.INT(), DataTypes.STRING(), DataTypes.TIMESTAMP(5).bridgedTo(Timestamp.class), DataTypes.TIMESTAMP(3));
FieldsDataType dataType = new FieldsDataType(logicalType, dataTypes);
ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(dataType);
assertThat(schema).isEqualTo(ResolvedSchema.of(Column.physical("f0", INT()), Column.physical("f1", STRING()), Column.physical("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), Column.physical("f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class))));
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class TypeMappingUtilsTest method testCheckPhysicalLogicalTypeCompatible.
@Test
public void testCheckPhysicalLogicalTypeCompatible() {
TableSchema tableSchema = TableSchema.builder().field("a", DataTypes.VARCHAR(2)).field("b", DataTypes.DECIMAL(20, 2)).build();
TableSink tableSink = new TestTableSink(tableSchema);
LegacyTypeInformationType legacyDataType = (LegacyTypeInformationType) tableSink.getConsumedDataType().getLogicalType();
TypeInformation legacyTypeInfo = ((TupleTypeInfo) legacyDataType.getTypeInformation()).getTypeAt(1);
DataType physicalType = TypeConversions.fromLegacyInfoToDataType(legacyTypeInfo);
ResolvedSchema physicSchema = DataTypeUtils.expandCompositeTypeToSchema(physicalType);
DataType[] logicalDataTypes = tableSchema.getFieldDataTypes();
List<DataType> physicalDataTypes = physicSchema.getColumnDataTypes();
for (int i = 0; i < logicalDataTypes.length; i++) {
TypeMappingUtils.checkPhysicalLogicalTypeCompatible(physicalDataTypes.get(i).getLogicalType(), logicalDataTypes[i].getLogicalType(), "physicalField", "logicalField", false);
}
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class TemporalTableSourceSpecSerdeTest method testTemporalTableSourceSpecSerde.
public static Stream<TemporalTableSourceSpec> testTemporalTableSourceSpecSerde() {
Map<String, String> options1 = new HashMap<>();
options1.put("connector", "filesystem");
options1.put("format", "testcsv");
options1.put("path", "/tmp");
final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
ResolvedCatalogTable resolvedCatalogTable = new ResolvedCatalogTable(catalogTable1, resolvedSchema1);
RelDataType relDataType1 = FACTORY.createSqlType(SqlTypeName.BIGINT);
LookupTableSource lookupTableSource = new TestValuesTableFactory.MockedLookupTableSource();
TableSourceTable tableSourceTable1 = new TableSourceTable(null, relDataType1, FlinkStatistic.UNKNOWN(), lookupTableSource, true, ContextResolvedTable.temporary(ObjectIdentifier.of("default_catalog", "default_db", "MyTable"), resolvedCatalogTable), FLINK_CONTEXT, new SourceAbilitySpec[] { new LimitPushDownSpec(100) });
TemporalTableSourceSpec temporalTableSourceSpec1 = new TemporalTableSourceSpec(tableSourceTable1);
return Stream.of(temporalTableSourceSpec1);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DataGenTableSourceFactoryTest method testDataTypeCoverage.
@Test
public void testDataTypeCoverage() throws Exception {
ResolvedSchema schema = ResolvedSchema.of(Column.physical("f0", DataTypes.CHAR(1)), Column.physical("f1", DataTypes.VARCHAR(10)), Column.physical("f2", DataTypes.STRING()), Column.physical("f3", DataTypes.BOOLEAN()), Column.physical("f4", DataTypes.DECIMAL(32, 2)), Column.physical("f5", DataTypes.TINYINT()), Column.physical("f6", DataTypes.SMALLINT()), Column.physical("f7", DataTypes.INT()), Column.physical("f8", DataTypes.BIGINT()), Column.physical("f9", DataTypes.FLOAT()), Column.physical("f10", DataTypes.DOUBLE()), Column.physical("f11", DataTypes.DATE()), Column.physical("f12", DataTypes.TIME()), Column.physical("f13", DataTypes.TIMESTAMP()), Column.physical("f14", DataTypes.TIMESTAMP_WITH_TIME_ZONE()), Column.physical("f15", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE()), Column.physical("f16", DataTypes.INTERVAL(DataTypes.DAY())), Column.physical("f17", DataTypes.ARRAY(DataTypes.INT())), Column.physical("f18", DataTypes.MAP(DataTypes.STRING(), DataTypes.DATE())), Column.physical("f19", DataTypes.MULTISET(DataTypes.DECIMAL(32, 2))), Column.physical("f20", DataTypes.ROW(DataTypes.FIELD("a", DataTypes.BIGINT()), DataTypes.FIELD("b", DataTypes.TIME()), DataTypes.FIELD("c", DataTypes.ROW(DataTypes.FIELD("d", DataTypes.TIMESTAMP()))))));
DescriptorProperties descriptor = new DescriptorProperties();
descriptor.putString(FactoryUtil.CONNECTOR.key(), "datagen");
descriptor.putString(DataGenConnectorOptions.NUMBER_OF_ROWS.key(), "10");
// add min max option for numeric types
descriptor.putString("fields.f4.min", "1.0");
descriptor.putString("fields.f4.max", "1000.0");
descriptor.putString("fields.f5.min", "0");
descriptor.putString("fields.f5.max", "127");
descriptor.putString("fields.f6.min", "0");
descriptor.putString("fields.f6.max", "32767");
descriptor.putString("fields.f7.min", "0");
descriptor.putString("fields.f7.max", "65535");
descriptor.putString("fields.f8.min", "0");
descriptor.putString("fields.f8.max", String.valueOf(Long.MAX_VALUE));
descriptor.putString("fields.f9.min", "0");
descriptor.putString("fields.f9.max", String.valueOf(Float.MAX_VALUE));
descriptor.putString("fields.f10.min", "0");
descriptor.putString("fields.f10.max", String.valueOf(Double.MAX_VALUE));
List<RowData> results = runGenerator(schema, descriptor);
Assert.assertEquals("Failed to generate all rows", 10, results.size());
for (RowData row : results) {
for (int i = 0; i < row.getArity(); i++) {
Assert.assertFalse("Column " + schema.getColumnNames().get(i) + " should not be null", row.isNullAt(i));
}
}
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class PushProjectIntoTableSourceScanRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final LogicalProject project = call.rel(0);
final LogicalTableScan scan = call.rel(1);
final TableSourceTable sourceTable = scan.getTable().unwrap(TableSourceTable.class);
final boolean supportsNestedProjection = supportsNestedProjection(sourceTable.tableSource());
final int[] refFields = RexNodeExtractor.extractRefInputFields(project.getProjects());
if (!supportsNestedProjection && refFields.length == scan.getRowType().getFieldCount()) {
// There is no top-level projection and nested projections aren't supported.
return;
}
final FlinkTypeFactory typeFactory = unwrapTypeFactory(scan);
final ResolvedSchema schema = sourceTable.contextResolvedTable().getResolvedSchema();
final RowType producedType = createProducedType(schema, sourceTable.tableSource());
final NestedSchema projectedSchema = NestedProjectionUtil.build(getProjections(project, scan), typeFactory.buildRelNodeRowType(producedType));
if (!supportsNestedProjection) {
for (NestedColumn column : projectedSchema.columns().values()) {
column.markLeaf();
}
}
final List<SourceAbilitySpec> abilitySpecs = new ArrayList<>();
final RowType newProducedType = performPushDown(sourceTable, projectedSchema, producedType, abilitySpecs);
final DynamicTableSource newTableSource = sourceTable.tableSource().copy();
final SourceAbilityContext context = SourceAbilityContext.from(scan);
abilitySpecs.forEach(spec -> spec.apply(newTableSource, context));
final RelDataType newRowType = typeFactory.buildRelNodeRowType(newProducedType);
final TableSourceTable newSource = sourceTable.copy(newTableSource, newRowType, abilitySpecs.toArray(new SourceAbilitySpec[0]));
final LogicalTableScan newScan = new LogicalTableScan(scan.getCluster(), scan.getTraitSet(), scan.getHints(), newSource);
final LogicalProject newProject = project.copy(project.getTraitSet(), newScan, rewriteProjections(call, newSource, projectedSchema), project.getRowType());
if (ProjectRemoveRule.isTrivial(newProject)) {
call.transformTo(newScan);
} else {
call.transformTo(newProject);
}
}
Aggregations