use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class KinesisDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
ReadableConfig tableOptions = helper.getOptions();
ResolvedCatalogTable catalogTable = context.getCatalogTable();
DataType physicalDataType = catalogTable.getResolvedSchema().toPhysicalRowDataType();
KinesisConnectorOptionsUtil optionsUtils = new KinesisConnectorOptionsUtil(catalogTable.getOptions(), tableOptions);
// initialize the table format early in order to register its consumedOptionKeys
// in the TableFactoryHelper, as those are needed for correct option validation
DecodingFormat<DeserializationSchema<RowData>> decodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, FORMAT);
// validate the data types of the table options
helper.validateExcept(optionsUtils.getNonValidatedPrefixes().toArray(new String[0]));
Properties properties = optionsUtils.getValidatedSourceConfigurations();
return new KinesisDynamicSource(physicalDataType, tableOptions.get(STREAM), properties, decodingFormat);
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class HBase1DynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
return new HBaseDynamicTableSink(tableName, hbaseSchema, hbaseConf, hBaseWriteOptions, nullStringLiteral);
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class HBase1DynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseClientConf = getHBaseConfiguration(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
return new HBaseDynamicTableSource(hbaseClientConf, tableName, hbaseSchema, nullStringLiteral, getHBaseLookupOptions(tableOptions));
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class JdbcDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig config = helper.getOptions();
helper.validate();
validateConfigOptions(config);
validateDataTypeWithJdbcDialect(context.getPhysicalRowDataType(), config.get(URL));
return new JdbcDynamicTableSource(getJdbcOptions(helper.getOptions()), getJdbcReadOptions(helper.getOptions()), getJdbcLookupOptions(helper.getOptions()), context.getPhysicalRowDataType());
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class KafkaDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
final Optional<EncodingFormat<SerializationSchema<RowData>>> keyEncodingFormat = getKeyEncodingFormat(helper);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = getValueEncodingFormat(helper);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
final DeliveryGuarantee deliveryGuarantee = validateDeprecatedSemantic(tableOptions);
validateTableSinkOptions(tableOptions);
KafkaConnectorOptionsUtil.validateDeliveryGuarantee(tableOptions);
validatePKConstraints(context.getObjectIdentifier(), context.getPrimaryKeyIndexes(), context.getCatalogTable().getOptions(), valueEncodingFormat);
final DataType physicalDataType = context.getPhysicalRowDataType();
final int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
final int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
final Integer parallelism = tableOptions.getOptional(SINK_PARALLELISM).orElse(null);
return createKafkaTableSink(physicalDataType, keyEncodingFormat.orElse(null), valueEncodingFormat, keyProjection, valueProjection, keyPrefix, tableOptions.get(TOPIC).get(0), getKafkaProperties(context.getCatalogTable().getOptions()), getFlinkKafkaPartitioner(tableOptions, context.getClassLoader()).orElse(null), deliveryGuarantee, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
Aggregations