Search in sources :

Example 11 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class KafkaDynamicTableFactory method createDynamicTableSource.

@Override
public DynamicTableSource createDynamicTableSource(Context context) {
    final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
    final Optional<DecodingFormat<DeserializationSchema<RowData>>> keyDecodingFormat = getKeyDecodingFormat(helper);
    final DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = getValueDecodingFormat(helper);
    helper.validateExcept(PROPERTIES_PREFIX);
    final ReadableConfig tableOptions = helper.getOptions();
    validateTableSourceOptions(tableOptions);
    validatePKConstraints(context.getObjectIdentifier(), context.getPrimaryKeyIndexes(), context.getCatalogTable().getOptions(), valueDecodingFormat);
    final StartupOptions startupOptions = getStartupOptions(tableOptions);
    final Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
    // add topic-partition discovery
    final Optional<Long> partitionDiscoveryInterval = tableOptions.getOptional(SCAN_TOPIC_PARTITION_DISCOVERY).map(Duration::toMillis);
    properties.setProperty(KafkaSourceOptions.PARTITION_DISCOVERY_INTERVAL_MS.key(), partitionDiscoveryInterval.orElse(-1L).toString());
    final DataType physicalDataType = context.getPhysicalRowDataType();
    final int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
    final int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
    final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
    return createKafkaTableSource(physicalDataType, keyDecodingFormat.orElse(null), valueDecodingFormat, keyProjection, valueProjection, keyPrefix, getSourceTopics(tableOptions), getSourceTopicPattern(tableOptions), properties, startupOptions.startupMode, startupOptions.specificOffsets, startupOptions.startupTimestampMillis, context.getObjectIdentifier().asSummaryString());
}
Also used : DecodingFormat(org.apache.flink.table.connector.format.DecodingFormat) TableFactoryHelper(org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper) Duration(java.time.Duration) KafkaConnectorOptionsUtil.getKafkaProperties(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.getKafkaProperties) Properties(java.util.Properties) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema) RowData(org.apache.flink.table.data.RowData) ReadableConfig(org.apache.flink.configuration.ReadableConfig) DataType(org.apache.flink.table.types.DataType) KafkaConnectorOptionsUtil.getStartupOptions(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.getStartupOptions) StartupOptions(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.StartupOptions)

Example 12 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class KafkaConnectorOptionsUtil method createValueFormatProjection.

/**
 * Creates an array of indices that determine which physical fields of the table schema to
 * include in the value format.
 *
 * <p>See {@link KafkaConnectorOptions#VALUE_FORMAT}, {@link
 * KafkaConnectorOptions#VALUE_FIELDS_INCLUDE}, and {@link
 * KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
 */
public static int[] createValueFormatProjection(ReadableConfig options, DataType physicalDataType) {
    final LogicalType physicalType = physicalDataType.getLogicalType();
    Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
    final int physicalFieldCount = LogicalTypeChecks.getFieldCount(physicalType);
    final IntStream physicalFields = IntStream.range(0, physicalFieldCount);
    final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
    final ValueFieldsStrategy strategy = options.get(VALUE_FIELDS_INCLUDE);
    if (strategy == ValueFieldsStrategy.ALL) {
        if (keyPrefix.length() > 0) {
            throw new ValidationException(String.format("A key prefix is not allowed when option '%s' is set to '%s'. " + "Set it to '%s' instead to avoid field overlaps.", VALUE_FIELDS_INCLUDE.key(), ValueFieldsStrategy.ALL, ValueFieldsStrategy.EXCEPT_KEY));
        }
        return physicalFields.toArray();
    } else if (strategy == ValueFieldsStrategy.EXCEPT_KEY) {
        final int[] keyProjection = createKeyFormatProjection(options, physicalDataType);
        return physicalFields.filter(pos -> IntStream.of(keyProjection).noneMatch(k -> k == pos)).toArray();
    }
    throw new TableException("Unknown value fields strategy:" + strategy);
}
Also used : DynamicTableFactory(org.apache.flink.table.factories.DynamicTableFactory) IntStream(java.util.stream.IntStream) DeliveryGuarantee(org.apache.flink.connector.base.DeliveryGuarantee) DataType(org.apache.flink.table.types.DataType) FlinkException(org.apache.flink.util.FlinkException) ConfigOptions(org.apache.flink.configuration.ConfigOptions) Arrays(java.util.Arrays) SCAN_STARTUP_TIMESTAMP_MILLIS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_TIMESTAMP_MILLIS) KEY_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FORMAT) TOPIC(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TOPIC) TRANSACTIONAL_ID_PREFIX(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TRANSACTIONAL_ID_PREFIX) DELIVERY_GUARANTEE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.DELIVERY_GUARANTEE) TOPIC_PATTERN(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TOPIC_PATTERN) HashMap(java.util.HashMap) VALUE_FIELDS_INCLUDE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FIELDS_INCLUDE) InstantiationUtil(org.apache.flink.util.InstantiationUtil) ReadableConfig(org.apache.flink.configuration.ReadableConfig) Map(java.util.Map) ConfigOption(org.apache.flink.configuration.ConfigOption) FORMAT(org.apache.flink.table.factories.FactoryUtil.FORMAT) FlinkFixedPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner) SCAN_STARTUP_MODE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_MODE) RowData(org.apache.flink.table.data.RowData) Properties(java.util.Properties) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) ValueFieldsStrategy(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ValueFieldsStrategy) Configuration(org.apache.flink.configuration.Configuration) TableException(org.apache.flink.table.api.TableException) VALUE_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FORMAT) ScanStartupMode(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ScanStartupMode) Preconditions(org.apache.flink.util.Preconditions) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) List(java.util.List) SINK_PARTITIONER(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SINK_PARTITIONER) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) LogicalType(org.apache.flink.table.types.logical.LogicalType) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) SCAN_STARTUP_SPECIFIC_OFFSETS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_SPECIFIC_OFFSETS) Internal(org.apache.flink.annotation.Internal) Pattern(java.util.regex.Pattern) KEY_FIELDS_PREFIX(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FIELDS_PREFIX) LogicalTypeRoot(org.apache.flink.table.types.logical.LogicalTypeRoot) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) KEY_FIELDS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FIELDS) TableException(org.apache.flink.table.api.TableException) ValidationException(org.apache.flink.table.api.ValidationException) ValueFieldsStrategy(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ValueFieldsStrategy) LogicalType(org.apache.flink.table.types.logical.LogicalType) IntStream(java.util.stream.IntStream)

Example 13 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class StreamPhysicalPythonGroupWindowAggregateRule method convert.

@Override
public RelNode convert(RelNode rel) {
    FlinkLogicalWindowAggregate agg = (FlinkLogicalWindowAggregate) rel;
    LogicalWindow window = agg.getWindow();
    List<AggregateCall> aggCalls = agg.getAggCallList();
    boolean isPandasPythonUDAF = aggCalls.stream().anyMatch(x -> PythonUtil.isPythonAggregate(x, PythonFunctionKind.PANDAS));
    if (isPandasPythonUDAF && window instanceof SessionGroupWindow) {
        throw new TableException("Session Group Window is currently not supported for Pandas UDAF.");
    }
    RelNode input = agg.getInput();
    RelOptCluster cluster = rel.getCluster();
    FlinkRelDistribution requiredDistribution;
    if (agg.getGroupCount() != 0) {
        requiredDistribution = FlinkRelDistribution.hash(agg.getGroupSet().asList(), true);
    } else {
        requiredDistribution = FlinkRelDistribution.SINGLETON();
    }
    RelTraitSet requiredTraitSet = input.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL()).replace(requiredDistribution);
    RelTraitSet providedTraitSet = rel.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL());
    RelNode newInput = RelOptRule.convert(input, requiredTraitSet);
    ReadableConfig config = ShortcutUtils.unwrapTableConfig(rel);
    WindowEmitStrategy emitStrategy = WindowEmitStrategy.apply(config, agg.getWindow());
    if (emitStrategy.produceUpdates()) {
        throw new TableException("Python Group Window Aggregate Function is currently not supported for early fired or lately fired.");
    }
    return new StreamPhysicalPythonGroupWindowAggregate(cluster, providedTraitSet, newInput, rel.getRowType(), agg.getGroupSet().toArray(), JavaScalaConversionUtil.toScala(aggCalls), agg.getWindow(), agg.getNamedProperties(), emitStrategy);
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) StreamPhysicalPythonGroupWindowAggregate(org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalPythonGroupWindowAggregate) TableException(org.apache.flink.table.api.TableException) RelTraitSet(org.apache.calcite.plan.RelTraitSet) AggregateCall(org.apache.calcite.rel.core.AggregateCall) FlinkRelDistribution(org.apache.flink.table.planner.plan.trait.FlinkRelDistribution) ReadableConfig(org.apache.flink.configuration.ReadableConfig) LogicalWindow(org.apache.flink.table.planner.plan.logical.LogicalWindow) RelNode(org.apache.calcite.rel.RelNode) WindowEmitStrategy(org.apache.flink.table.planner.plan.utils.WindowEmitStrategy) FlinkLogicalWindowAggregate(org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalWindowAggregate) SessionGroupWindow(org.apache.flink.table.planner.plan.logical.SessionGroupWindow)

Example 14 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class CatalogSourceTable method computeContextResolvedTable.

private ContextResolvedTable computeContextResolvedTable(FlinkContext context, Map<String, String> hintedOptions) {
    ContextResolvedTable contextResolvedTable = schemaTable.getContextResolvedTable();
    if (hintedOptions.isEmpty()) {
        return contextResolvedTable;
    }
    final ReadableConfig config = context.getTableConfig().getConfiguration();
    if (!config.get(TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED)) {
        throw new ValidationException(String.format("The '%s' hint is allowed only when the config option '%s' is set to true.", FlinkHints.HINT_NAME_OPTIONS, TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED.key()));
    }
    if (contextResolvedTable.getResolvedTable().getTableKind() == TableKind.VIEW) {
        throw new ValidationException(String.format("View '%s' cannot be enriched with new options. " + "Hints can only be applied to tables.", contextResolvedTable.getIdentifier()));
    }
    return contextResolvedTable.copy(FlinkHints.mergeTableOptions(hintedOptions, contextResolvedTable.getResolvedTable().getOptions()));
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) ValidationException(org.apache.flink.table.api.ValidationException) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable)

Example 15 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class LocalExecutor method executeQuery.

@Override
public ResultDescriptor executeQuery(String sessionId, QueryOperation query) throws SqlExecutionException {
    final TableResultInternal tableResult = executeOperation(sessionId, query);
    final SessionContext context = getSessionContext(sessionId);
    final ReadableConfig config = context.getReadableConfig();
    final DynamicResult result = resultStore.createResult(config, tableResult);
    checkArgument(tableResult.getJobClient().isPresent());
    String jobId = tableResult.getJobClient().get().getJobID().toString();
    // store the result under the JobID
    resultStore.storeResult(jobId, result);
    return new ResultDescriptor(jobId, tableResult.getResolvedSchema(), result.isMaterialized(), config, tableResult.getRowDataToStringConverter());
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) SessionContext(org.apache.flink.table.client.gateway.context.SessionContext) ResultDescriptor(org.apache.flink.table.client.gateway.ResultDescriptor) DynamicResult(org.apache.flink.table.client.gateway.local.result.DynamicResult) TableResultInternal(org.apache.flink.table.api.internal.TableResultInternal)

Aggregations

ReadableConfig (org.apache.flink.configuration.ReadableConfig)28 FactoryUtil (org.apache.flink.table.factories.FactoryUtil)9 RowData (org.apache.flink.table.data.RowData)8 DataType (org.apache.flink.table.types.DataType)7 Properties (java.util.Properties)6 TableException (org.apache.flink.table.api.TableException)6 TableFactoryHelper (org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper)5 Arrays (java.util.Arrays)4 List (java.util.List)4 Optional (java.util.Optional)4 IntStream (java.util.stream.IntStream)4 Internal (org.apache.flink.annotation.Internal)4 DeserializationSchema (org.apache.flink.api.common.serialization.DeserializationSchema)4 ConfigOption (org.apache.flink.configuration.ConfigOption)4 HBaseConnectorOptionsUtil.getHBaseConfiguration (org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseConfiguration)4 HBaseTableSchema (org.apache.flink.connector.hbase.util.HBaseTableSchema)4 DeliveryGuarantee (org.apache.flink.connector.base.DeliveryGuarantee)3 ChangelogMode (org.apache.flink.table.connector.ChangelogMode)3 EncodingFormat (org.apache.flink.table.connector.format.EncodingFormat)3 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)3