Search in sources :

Example 51 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class MaterializedCollectStreamResultTest method testSnapshot.

@Test
public void testSnapshot() throws Exception {
    final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
    @SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
    try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), Integer.MAX_VALUE, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
        result.isRetrieving = true;
        result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "C", 2));
        assertEquals(TypedResult.payload(4), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter);
        result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "A", 1));
        assertEquals(TypedResult.payload(3), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(3), rowConverter);
        result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "C", 2));
        result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "A", 1));
        result.processRecord(Row.ofKind(RowKind.UPDATE_AFTER, "D", 1));
        assertEquals(TypedResult.payload(2), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("D", 1)), result.retrievePage(2), rowConverter);
    }
}
Also used : RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) DataStructureConverter(org.apache.flink.table.data.conversion.DataStructureConverter) Row(org.apache.flink.types.Row) TestTableResult(org.apache.flink.table.client.cli.utils.TestTableResult) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 52 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class MaterializedCollectStreamResultTest method testLimitedSnapshot.

@Test
public void testLimitedSnapshot() throws Exception {
    final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
    @SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
    // with 3 rows overcommitment
    try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), 2, 3, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
        result.isRetrieving = true;
        result.processRecord(Row.ofKind(RowKind.INSERT, "D", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
        assertRowEquals(Arrays.asList(null, null, Row.ofKind(RowKind.INSERT, "B", 1), // two over-committed rows
        Row.ofKind(RowKind.INSERT, "A", 1)), result.getMaterializedTable(), rowConverter);
        assertEquals(TypedResult.payload(2), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.ofKind(RowKind.INSERT, "B", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.ofKind(RowKind.INSERT, "A", 1)), result.retrievePage(2), rowConverter);
        result.processRecord(Row.ofKind(RowKind.INSERT, "C", 1));
        assertRowEquals(Arrays.asList(Row.ofKind(RowKind.INSERT, "A", 1), // limit clean up has taken place
        Row.ofKind(RowKind.INSERT, "C", 1)), result.getMaterializedTable(), rowConverter);
        result.processRecord(Row.ofKind(RowKind.DELETE, "A", 1));
        assertRowEquals(Collections.singletonList(// regular clean up has taken place
        Row.ofKind(RowKind.INSERT, "C", 1)), result.getMaterializedTable(), rowConverter);
    }
}
Also used : RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) DataStructureConverter(org.apache.flink.table.data.conversion.DataStructureConverter) Row(org.apache.flink.types.Row) TestTableResult(org.apache.flink.table.client.cli.utils.TestTableResult) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 53 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class TimestampExtractorUtils method getAccessedFields.

/**
 * Retrieves all field accesses needed for the given {@link TimestampExtractor}.
 *
 * @param timestampExtractor Extractor for which to construct array of field accesses.
 * @param physicalInputType Physical input type that the timestamp extractor accesses.
 * @param nameRemapping Additional remapping of a logical to a physical field name.
 *     TimestampExtractor works with logical names, but accesses physical fields
 * @return Array of physical field references.
 */
public static ResolvedFieldReference[] getAccessedFields(TimestampExtractor timestampExtractor, DataType physicalInputType, Function<String, String> nameRemapping) {
    final Function<String, ResolvedFieldReference> fieldMapping;
    if (LogicalTypeChecks.isCompositeType(physicalInputType.getLogicalType())) {
        ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(physicalInputType);
        fieldMapping = (arg) -> mapToResolvedField(nameRemapping, schema, arg);
    } else {
        fieldMapping = (arg) -> new ResolvedFieldReference(arg, TypeConversions.fromDataTypeToLegacyInfo(physicalInputType), 0);
    }
    return getAccessedFields(timestampExtractor, fieldMapping);
}
Also used : ResolvedFieldReference(org.apache.flink.table.expressions.ResolvedFieldReference) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 54 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class TableSchema method fromResolvedSchema.

/**
 * Helps to migrate to the new {@link ResolvedSchema} to old API methods.
 */
public static TableSchema fromResolvedSchema(ResolvedSchema resolvedSchema) {
    final TableSchema.Builder builder = TableSchema.builder();
    resolvedSchema.getColumns().stream().map(column -> {
        if (column instanceof Column.PhysicalColumn) {
            final Column.PhysicalColumn c = (Column.PhysicalColumn) column;
            return TableColumn.physical(c.getName(), c.getDataType());
        } else if (column instanceof Column.MetadataColumn) {
            final Column.MetadataColumn c = (Column.MetadataColumn) column;
            return TableColumn.metadata(c.getName(), c.getDataType(), c.getMetadataKey().orElse(null), c.isVirtual());
        } else if (column instanceof Column.ComputedColumn) {
            final Column.ComputedColumn c = (Column.ComputedColumn) column;
            return TableColumn.computed(c.getName(), c.getDataType(), c.getExpression().asSerializableString());
        }
        throw new IllegalArgumentException("Unsupported column type: " + column);
    }).forEach(builder::add);
    resolvedSchema.getWatermarkSpecs().forEach(spec -> builder.watermark(spec.getRowtimeAttribute(), spec.getWatermarkExpression().asSerializableString(), spec.getWatermarkExpression().getOutputDataType()));
    resolvedSchema.getPrimaryKey().ifPresent(pk -> builder.primaryKey(pk.getName(), pk.getColumns().toArray(new String[0])));
    return builder.build();
}
Also used : IntStream(java.util.stream.IntStream) DataType(org.apache.flink.table.types.DataType) Arrays(java.util.Arrays) ROW(org.apache.flink.table.api.DataTypes.ROW) Column(org.apache.flink.table.catalog.Column) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) MetadataColumn(org.apache.flink.table.api.TableColumn.MetadataColumn) PublicEvolving(org.apache.flink.annotation.PublicEvolving) HashMap(java.util.HashMap) PhysicalColumn(org.apache.flink.table.api.TableColumn.PhysicalColumn) Function(java.util.function.Function) TypeConversions.fromDataTypeToLegacyInfo(org.apache.flink.table.types.utils.TypeConversions.fromDataTypeToLegacyInfo) ArrayList(java.util.ArrayList) Map(java.util.Map) TypeConversions.fromLegacyInfoToDataType(org.apache.flink.table.types.utils.TypeConversions.fromLegacyInfoToDataType) TypeInformation(org.apache.flink.api.common.typeinfo.TypeInformation) FIELD(org.apache.flink.table.api.DataTypes.FIELD) Nullable(javax.annotation.Nullable) ComputedColumn(org.apache.flink.table.api.TableColumn.ComputedColumn) CompositeType(org.apache.flink.api.common.typeutils.CompositeType) LogicalTypeChecks.isCompositeType(org.apache.flink.table.types.logical.utils.LogicalTypeChecks.isCompositeType) UUID(java.util.UUID) Preconditions(org.apache.flink.util.Preconditions) StringUtils(org.apache.flink.util.StringUtils) Collectors(java.util.stream.Collectors) Field(org.apache.flink.table.api.DataTypes.Field) Objects(java.util.Objects) LogicalTypeChecks.canBeTimeAttributeType(org.apache.flink.table.types.logical.utils.LogicalTypeChecks.canBeTimeAttributeType) LegacyTypeInformationType(org.apache.flink.table.types.logical.LegacyTypeInformationType) List(java.util.List) LogicalType(org.apache.flink.table.types.logical.LogicalType) Optional(java.util.Optional) TypeConversions(org.apache.flink.table.types.utils.TypeConversions) Row(org.apache.flink.types.Row) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) LogicalTypeRoot(org.apache.flink.table.types.logical.LogicalTypeRoot) Collections(java.util.Collections) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) MetadataColumn(org.apache.flink.table.api.TableColumn.MetadataColumn) ComputedColumn(org.apache.flink.table.api.TableColumn.ComputedColumn) Column(org.apache.flink.table.catalog.Column) MetadataColumn(org.apache.flink.table.api.TableColumn.MetadataColumn) PhysicalColumn(org.apache.flink.table.api.TableColumn.PhysicalColumn) ComputedColumn(org.apache.flink.table.api.TableColumn.ComputedColumn) PhysicalColumn(org.apache.flink.table.api.TableColumn.PhysicalColumn) MetadataColumn(org.apache.flink.table.api.TableColumn.MetadataColumn) ComputedColumn(org.apache.flink.table.api.TableColumn.ComputedColumn) PhysicalColumn(org.apache.flink.table.api.TableColumn.PhysicalColumn)

Example 55 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class QueryOperationTest method testSummaryString.

@Test
public void testSummaryString() {
    ResolvedSchema schema = ResolvedSchema.physical(Collections.singletonList("a"), Collections.singletonList(DataTypes.INT()));
    ProjectQueryOperation tableOperation = new ProjectQueryOperation(Collections.singletonList(new FieldReferenceExpression("a", DataTypes.INT(), 0, 0)), new SourceQueryOperation(ContextResolvedTable.temporary(ObjectIdentifier.of("cat1", "db1", "tab1"), new ResolvedCatalogTable(CatalogTable.of(Schema.newBuilder().build(), null, Collections.emptyList(), Collections.emptyMap()), schema))), schema);
    SetQueryOperation unionQueryOperation = new SetQueryOperation(tableOperation, tableOperation, SetQueryOperation.SetQueryOperationType.UNION, true, schema);
    assertEquals("Union: (all: [true])\n" + "    Project: (projections: [a])\n" + "        CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])\n" + "    Project: (projections: [a])\n" + "        CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])", unionQueryOperation.asSummaryString());
}
Also used : ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) FieldReferenceExpression(org.apache.flink.table.expressions.FieldReferenceExpression) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6