Search in sources :

Example 71 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testUnknownOption.

@Test
public void testUnknownOption() {
    Map<String, String> options = getAllOptions();
    options.put("sink.unknown.key", "unknown-value");
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))));
    try {
        createTableSource(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "Unsupported options:\n\nsink.unknown.key").isPresent());
    }
    try {
        createTableSink(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "Unsupported options:\n\nsink.unknown.key").isPresent());
    }
}
Also used : ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ExpectedException(org.junit.rules.ExpectedException) Test(org.junit.Test)

Example 72 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class HBaseDynamicTableFactoryTest method testTypeWithUnsupportedPrecision.

@Test
public void testTypeWithUnsupportedPrecision() {
    Map<String, String> options = getAllOptions();
    // test unsupported timestamp precision
    ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, TIMESTAMP(6)), FIELD(COL2, INT()))));
    try {
        createTableSource(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIMESTAMP type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
    try {
        createTableSink(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIMESTAMP type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
    // test unsupported time precision
    schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, TIME(6)), FIELD(COL2, INT()))));
    try {
        createTableSource(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIME type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
    try {
        createTableSink(schema, options);
        fail("Should fail");
    } catch (Exception e) {
        assertTrue(ExceptionUtils.findThrowableWithMessage(e, "The precision 6 of TIME type is out of the range [0, 3]" + " supported by HBase connector").isPresent());
    }
}
Also used : ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ExpectedException(org.junit.rules.ExpectedException) Test(org.junit.Test)

Example 73 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class CommonExecSink method createSinkTransformation.

@SuppressWarnings("unchecked")
protected Transformation<Object> createSinkTransformation(StreamExecutionEnvironment streamExecEnv, ReadableConfig config, Transformation<RowData> inputTransform, DynamicTableSink tableSink, int rowtimeFieldIndex, boolean upsertMaterialize) {
    final ResolvedSchema schema = tableSinkSpec.getContextResolvedTable().getResolvedSchema();
    final SinkRuntimeProvider runtimeProvider = tableSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(isBounded));
    final RowType physicalRowType = getPhysicalRowType(schema);
    final int[] primaryKeys = getPrimaryKeyIndices(physicalRowType, schema);
    final int sinkParallelism = deriveSinkParallelism(inputTransform, runtimeProvider);
    final int inputParallelism = inputTransform.getParallelism();
    final boolean inputInsertOnly = inputChangelogMode.containsOnly(RowKind.INSERT);
    final boolean hasPk = primaryKeys.length > 0;
    if (!inputInsertOnly && sinkParallelism != inputParallelism && !hasPk) {
        throw new TableException(String.format("The sink for table '%s' has a configured parallelism of %s, while the input parallelism is %s. " + "Since the configured parallelism is different from the input's parallelism and " + "the changelog mode is not insert-only, a primary key is required but could not " + "be found.", tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString(), sinkParallelism, inputParallelism));
    }
    // only add materialization if input has change
    final boolean needMaterialization = !inputInsertOnly && upsertMaterialize;
    Transformation<RowData> sinkTransform = applyConstraintValidations(inputTransform, config, physicalRowType);
    if (hasPk) {
        sinkTransform = applyKeyBy(config, sinkTransform, primaryKeys, sinkParallelism, inputParallelism, inputInsertOnly, needMaterialization);
    }
    if (needMaterialization) {
        sinkTransform = applyUpsertMaterialize(sinkTransform, primaryKeys, sinkParallelism, config, physicalRowType);
    }
    return (Transformation<Object>) applySinkProvider(sinkTransform, streamExecEnv, runtimeProvider, rowtimeFieldIndex, sinkParallelism, config);
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) TableException(org.apache.flink.table.api.TableException) RowData(org.apache.flink.table.data.RowData) PartitionTransformation(org.apache.flink.streaming.api.transformations.PartitionTransformation) LegacySinkTransformation(org.apache.flink.streaming.api.transformations.LegacySinkTransformation) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) RowType(org.apache.flink.table.types.logical.RowType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) SinkRuntimeProvider(org.apache.flink.table.connector.sink.DynamicTableSink.SinkRuntimeProvider)

Example 74 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class ResolvedSchemaJsonDeserializer method deserialize.

@Override
public ResolvedSchema deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
    ObjectNode jsonNode = jsonParser.readValueAsTree();
    List<Column> columns = ctx.readValue(traverse(jsonNode.required(COLUMNS), jsonParser.getCodec()), ctx.getTypeFactory().constructCollectionType(List.class, Column.class));
    List<WatermarkSpec> watermarkSpecs = ctx.readValue(traverse(jsonNode.required(WATERMARK_SPECS), jsonParser.getCodec()), ctx.getTypeFactory().constructCollectionType(List.class, WatermarkSpec.class));
    UniqueConstraint primaryKey = deserializeOptionalField(jsonNode, PRIMARY_KEY, UniqueConstraint.class, jsonParser.getCodec(), ctx).orElse(null);
    return new ResolvedSchema(columns, watermarkSpecs, primaryKey);
}
Also used : ObjectNode(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode) WatermarkSpec(org.apache.flink.table.catalog.WatermarkSpec) Column(org.apache.flink.table.catalog.Column) UniqueConstraint(org.apache.flink.table.catalog.UniqueConstraint) List(java.util.List) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 75 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class ResolvedCatalogTableJsonDeserializer method deserialize.

@Override
public ResolvedCatalogTable deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
    ObjectNode jsonNode = jsonParser.readValueAsTree();
    ResolvedSchema resolvedSchema = ctx.readValue(traverse(jsonNode.required(RESOLVED_SCHEMA), jsonParser.getCodec()), ResolvedSchema.class);
    List<String> partitionKeys = ctx.readValue(traverse(jsonNode.required(PARTITION_KEYS), jsonParser.getCodec()), ctx.getTypeFactory().constructCollectionType(List.class, String.class));
    String comment = deserializeOptionalField(jsonNode, COMMENT, String.class, jsonParser.getCodec(), ctx).orElse(null);
    @SuppressWarnings("unchecked") Map<String, String> options = (Map<String, String>) deserializeOptionalField(jsonNode, OPTIONS, ctx.getTypeFactory().constructMapType(Map.class, String.class, String.class), jsonParser.getCodec(), ctx).orElse(Collections.emptyMap());
    return new ResolvedCatalogTable(CatalogTable.of(// reason, in case one tries to access the unresolved schema.
    Schema.newBuilder().fromResolvedSchema(resolvedSchema).build(), comment, partitionKeys, options), resolvedSchema);
}
Also used : ObjectNode(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode) ResolvedCatalogTable(org.apache.flink.table.catalog.ResolvedCatalogTable) List(java.util.List) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Map(java.util.Map)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6