Search in sources :

Example 16 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class PrintTableSinkFactory method createDynamicTableSink.

@Override
public DynamicTableSink createDynamicTableSink(Context context) {
    FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
    helper.validate();
    ReadableConfig options = helper.getOptions();
    return new PrintSink(context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType(), context.getCatalogTable().getPartitionKeys(), options.get(PRINT_IDENTIFIER), options.get(STANDARD_ERROR), options.getOptional(FactoryUtil.SINK_PARALLELISM).orElse(null));
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) FactoryUtil(org.apache.flink.table.factories.FactoryUtil)

Example 17 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class CommonExecSink method applyConstraintValidations.

/**
 * Apply an operator to filter or report error to process not-null values for not-null fields.
 */
private Transformation<RowData> applyConstraintValidations(Transformation<RowData> inputTransform, ReadableConfig config, RowType physicalRowType) {
    final ConstraintEnforcer.Builder validatorBuilder = ConstraintEnforcer.newBuilder();
    final String[] fieldNames = physicalRowType.getFieldNames().toArray(new String[0]);
    // Build NOT NULL enforcer
    final int[] notNullFieldIndices = getNotNullFieldIndices(physicalRowType);
    if (notNullFieldIndices.length > 0) {
        final ExecutionConfigOptions.NotNullEnforcer notNullEnforcer = config.get(ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER);
        final List<String> notNullFieldNames = Arrays.stream(notNullFieldIndices).mapToObj(idx -> fieldNames[idx]).collect(Collectors.toList());
        validatorBuilder.addNotNullConstraint(notNullEnforcer, notNullFieldIndices, notNullFieldNames, fieldNames);
    }
    final ExecutionConfigOptions.TypeLengthEnforcer typeLengthEnforcer = config.get(ExecutionConfigOptions.TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER);
    // Build CHAR/VARCHAR length enforcer
    final List<ConstraintEnforcer.FieldInfo> charFieldInfo = getFieldInfoForLengthEnforcer(physicalRowType, LengthEnforcerType.CHAR);
    if (!charFieldInfo.isEmpty()) {
        final List<String> charFieldNames = charFieldInfo.stream().map(cfi -> fieldNames[cfi.fieldIdx()]).collect(Collectors.toList());
        validatorBuilder.addCharLengthConstraint(typeLengthEnforcer, charFieldInfo, charFieldNames, fieldNames);
    }
    // Build BINARY/VARBINARY length enforcer
    final List<ConstraintEnforcer.FieldInfo> binaryFieldInfo = getFieldInfoForLengthEnforcer(physicalRowType, LengthEnforcerType.BINARY);
    if (!binaryFieldInfo.isEmpty()) {
        final List<String> binaryFieldNames = binaryFieldInfo.stream().map(cfi -> fieldNames[cfi.fieldIdx()]).collect(Collectors.toList());
        validatorBuilder.addBinaryLengthConstraint(typeLengthEnforcer, binaryFieldInfo, binaryFieldNames, fieldNames);
    }
    ConstraintEnforcer constraintEnforcer = validatorBuilder.build();
    if (constraintEnforcer != null) {
        return ExecNodeUtil.createOneInputTransformation(inputTransform, createTransformationMeta(CONSTRAINT_VALIDATOR_TRANSFORMATION, constraintEnforcer.getOperatorName(), "ConstraintEnforcer", config), constraintEnforcer, getInputTypeInfo(), inputTransform.getParallelism());
    } else {
        // there are no not-null fields, just skip adding the enforcer operator
        return inputTransform;
    }
}
Also used : TransformationMetadata(org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata) Arrays(java.util.Arrays) InputProperty(org.apache.flink.table.planner.plan.nodes.exec.InputProperty) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) CharType(org.apache.flink.table.types.logical.CharType) KeySelectorUtil(org.apache.flink.table.planner.plan.utils.KeySelectorUtil) InternalSerializers(org.apache.flink.table.runtime.typeutils.InternalSerializers) ConstraintEnforcer(org.apache.flink.table.runtime.operators.sink.ConstraintEnforcer) OutputFormat(org.apache.flink.api.common.io.OutputFormat) SinkUpsertMaterializer(org.apache.flink.table.runtime.operators.sink.SinkUpsertMaterializer) PartitionTransformation(org.apache.flink.streaming.api.transformations.PartitionTransformation) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) StateConfigUtil(org.apache.flink.table.runtime.util.StateConfigUtil) RowDataKeySelector(org.apache.flink.table.runtime.keyselector.RowDataKeySelector) Collectors(java.util.stream.Collectors) JsonProperty(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty) SimpleOperatorFactory(org.apache.flink.streaming.api.operators.SimpleOperatorFactory) List(java.util.List) InternalTypeInfo(org.apache.flink.table.runtime.typeutils.InternalTypeInfo) LogicalType(org.apache.flink.table.types.logical.LogicalType) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) LegacySinkTransformation(org.apache.flink.streaming.api.transformations.LegacySinkTransformation) Optional(java.util.Optional) ExecNodeBase(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeBase) KeyGroupRangeAssignment(org.apache.flink.runtime.state.KeyGroupRangeAssignment) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) SinkRuntimeProvider(org.apache.flink.table.connector.sink.DynamicTableSink.SinkRuntimeProvider) IntStream(java.util.stream.IntStream) BinaryType(org.apache.flink.table.types.logical.BinaryType) ParallelismProvider(org.apache.flink.table.connector.ParallelismProvider) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) MultipleTransformationTranslator(org.apache.flink.table.planner.plan.nodes.exec.MultipleTransformationTranslator) StreamRecordTimestampInserter(org.apache.flink.table.runtime.operators.sink.StreamRecordTimestampInserter) TransformationSinkProvider(org.apache.flink.table.planner.connectors.TransformationSinkProvider) RowType(org.apache.flink.table.types.logical.RowType) ArrayList(java.util.ArrayList) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) OutputFormatSinkFunction(org.apache.flink.streaming.api.functions.sink.OutputFormatSinkFunction) DynamicTableSinkSpec(org.apache.flink.table.planner.plan.nodes.exec.spec.DynamicTableSinkSpec) ExecNodeUtil(org.apache.flink.table.planner.plan.nodes.exec.utils.ExecNodeUtil) ReadableConfig(org.apache.flink.configuration.ReadableConfig) SinkFunctionProvider(org.apache.flink.table.connector.sink.SinkFunctionProvider) DataStreamSink(org.apache.flink.streaming.api.datastream.DataStreamSink) ExecNodeContext(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeContext) RowData(org.apache.flink.table.data.RowData) ProviderContext(org.apache.flink.table.connector.ProviderContext) SinkOperator(org.apache.flink.table.runtime.operators.sink.SinkOperator) TableException(org.apache.flink.table.api.TableException) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) DataStream(org.apache.flink.streaming.api.datastream.DataStream) OutputFormatProvider(org.apache.flink.table.connector.sink.OutputFormatProvider) KeyGroupStreamPartitioner(org.apache.flink.streaming.runtime.partitioner.KeyGroupStreamPartitioner) EqualiserCodeGenerator(org.apache.flink.table.planner.codegen.EqualiserCodeGenerator) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) RowKind(org.apache.flink.types.RowKind) StreamExecNode(org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecNode) GeneratedRecordEqualiser(org.apache.flink.table.runtime.generated.GeneratedRecordEqualiser) Transformation(org.apache.flink.api.dag.Transformation) InputTypeConfigurable(org.apache.flink.api.java.typeutils.InputTypeConfigurable) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) LogicalTypeRoot(org.apache.flink.table.types.logical.LogicalTypeRoot) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) ConstraintEnforcer(org.apache.flink.table.runtime.operators.sink.ConstraintEnforcer)

Example 18 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class RexNodeJsonSerializer method serialize.

@Override
public void serialize(RexNode rexNode, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException {
    final ReadableConfig config = SerdeContext.get(serializerProvider).getConfiguration();
    final CatalogPlanCompilation compilationStrategy = config.get(TableConfigOptions.PLAN_COMPILE_CATALOG_OBJECTS);
    switch(rexNode.getKind()) {
        case INPUT_REF:
        case TABLE_INPUT_REF:
            serializeInputRef((RexInputRef) rexNode, jsonGenerator, serializerProvider);
            break;
        case LITERAL:
            serializeLiteral((RexLiteral) rexNode, jsonGenerator, serializerProvider);
            break;
        case FIELD_ACCESS:
            serializeFieldAccess((RexFieldAccess) rexNode, jsonGenerator, serializerProvider);
            break;
        case CORREL_VARIABLE:
            serializeCorrelVariable((RexCorrelVariable) rexNode, jsonGenerator, serializerProvider);
            break;
        case PATTERN_INPUT_REF:
            serializePatternFieldRef((RexPatternFieldRef) rexNode, jsonGenerator, serializerProvider);
            break;
        default:
            if (rexNode instanceof RexCall) {
                serializeCall((RexCall) rexNode, jsonGenerator, serializerProvider, compilationStrategy);
            } else {
                throw new TableException("Unknown RexNode: " + rexNode);
            }
    }
}
Also used : RexCall(org.apache.calcite.rex.RexCall) ReadableConfig(org.apache.flink.configuration.ReadableConfig) TableException(org.apache.flink.table.api.TableException) CatalogPlanCompilation(org.apache.flink.table.api.config.TableConfigOptions.CatalogPlanCompilation)

Example 19 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class LogicalTypeJsonSerializer method serialize.

@Override
public void serialize(LogicalType logicalType, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException {
    final ReadableConfig config = SerdeContext.get(serializerProvider).getConfiguration();
    final boolean serializeCatalogObjects = !config.get(TableConfigOptions.PLAN_COMPILE_CATALOG_OBJECTS).equals(CatalogPlanCompilation.IDENTIFIER);
    serializeInternal(logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig)

Example 20 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class AggregateCallJsonSerializer method serialize.

@Override
public void serialize(AggregateCall aggCall, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException {
    final ReadableConfig config = SerdeContext.get(serializerProvider).getConfiguration();
    final CatalogPlanCompilation compilationStrategy = config.get(TableConfigOptions.PLAN_COMPILE_CATALOG_OBJECTS);
    jsonGenerator.writeStartObject();
    jsonGenerator.writeStringField(FIELD_NAME_NAME, aggCall.getName());
    RexNodeJsonSerializer.serializeSqlOperator(aggCall.getAggregation(), jsonGenerator, serializerProvider, compilationStrategy == CatalogPlanCompilation.ALL);
    jsonGenerator.writeFieldName(FIELD_NAME_ARG_LIST);
    jsonGenerator.writeStartArray();
    for (int arg : aggCall.getArgList()) {
        jsonGenerator.writeNumber(arg);
    }
    jsonGenerator.writeEndArray();
    jsonGenerator.writeNumberField(FIELD_NAME_FILTER_ARG, aggCall.filterArg);
    jsonGenerator.writeBooleanField(FIELD_NAME_DISTINCT, aggCall.isDistinct());
    jsonGenerator.writeBooleanField(FIELD_NAME_APPROXIMATE, aggCall.isApproximate());
    jsonGenerator.writeBooleanField(FIELD_NAME_IGNORE_NULLS, aggCall.ignoreNulls());
    serializerProvider.defaultSerializeField(FIELD_NAME_TYPE, aggCall.getType(), jsonGenerator);
    jsonGenerator.writeEndObject();
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) CatalogPlanCompilation(org.apache.flink.table.api.config.TableConfigOptions.CatalogPlanCompilation)

Aggregations

ReadableConfig (org.apache.flink.configuration.ReadableConfig)28 FactoryUtil (org.apache.flink.table.factories.FactoryUtil)9 RowData (org.apache.flink.table.data.RowData)8 DataType (org.apache.flink.table.types.DataType)7 Properties (java.util.Properties)6 TableException (org.apache.flink.table.api.TableException)6 TableFactoryHelper (org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper)5 Arrays (java.util.Arrays)4 List (java.util.List)4 Optional (java.util.Optional)4 IntStream (java.util.stream.IntStream)4 Internal (org.apache.flink.annotation.Internal)4 DeserializationSchema (org.apache.flink.api.common.serialization.DeserializationSchema)4 ConfigOption (org.apache.flink.configuration.ConfigOption)4 HBaseConnectorOptionsUtil.getHBaseConfiguration (org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseConfiguration)4 HBaseTableSchema (org.apache.flink.connector.hbase.util.HBaseTableSchema)4 DeliveryGuarantee (org.apache.flink.connector.base.DeliveryGuarantee)3 ChangelogMode (org.apache.flink.table.connector.ChangelogMode)3 EncodingFormat (org.apache.flink.table.connector.format.EncodingFormat)3 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)3