use of org.apache.flink.api.common.serialization.SerializationSchema in project flink by apache.
the class KafkaDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
final Optional<EncodingFormat<SerializationSchema<RowData>>> keyEncodingFormat = getKeyEncodingFormat(helper);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = getValueEncodingFormat(helper);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
final DeliveryGuarantee deliveryGuarantee = validateDeprecatedSemantic(tableOptions);
validateTableSinkOptions(tableOptions);
KafkaConnectorOptionsUtil.validateDeliveryGuarantee(tableOptions);
validatePKConstraints(context.getObjectIdentifier(), context.getPrimaryKeyIndexes(), context.getCatalogTable().getOptions(), valueEncodingFormat);
final DataType physicalDataType = context.getPhysicalRowDataType();
final int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
final int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
final Integer parallelism = tableOptions.getOptional(SINK_PARALLELISM).orElse(null);
return createKafkaTableSink(physicalDataType, keyEncodingFormat.orElse(null), valueEncodingFormat, keyProjection, valueProjection, keyPrefix, tableOptions.get(TOPIC).get(0), getKafkaProperties(context.getCatalogTable().getOptions()), getFlinkKafkaPartitioner(tableOptions, context.getClassLoader()).orElse(null), deliveryGuarantee, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
use of org.apache.flink.api.common.serialization.SerializationSchema in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put("sink.parallelism", "100"));
KafkaDynamicSink actualSink = (KafkaDynamicSink) createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, 100, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertThat(sinkProvider.getParallelism().isPresent()).isTrue();
assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100);
}
use of org.apache.flink.api.common.serialization.SerializationSchema in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSinkSemanticTranslation.
@Test
public void testTableSinkSemanticTranslation() {
final List<String> semantics = ImmutableList.of("exactly-once", "at-least-once", "none");
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
for (final String semantic : semantics) {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> {
options.put("sink.semantic", semantic);
options.put("sink.transactional-id-prefix", "kafka-sink");
});
final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.valueOf(semantic.toUpperCase().replace("-", "_")), null, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
}
}
use of org.apache.flink.api.common.serialization.SerializationSchema in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSink.
@Test
public void testTableSink() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> {
options.put("sink.delivery-guarantee", "exactly-once");
options.put("sink.transactional-id-prefix", "kafka-sink");
});
final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, null, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
// Test kafka producer.
final KafkaDynamicSink actualKafkaSink = (KafkaDynamicSink) actualSink;
DynamicTableSink.SinkRuntimeProvider provider = actualKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
final Sink<RowData> sinkFunction = sinkProvider.createSink();
assertThat(sinkFunction).isInstanceOf(KafkaSink.class);
}
use of org.apache.flink.api.common.serialization.SerializationSchema in project flink by apache.
the class UpsertKafkaDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
final ReadableConfig tableOptions = helper.getOptions();
EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat = helper.discoverEncodingFormat(SerializationFormatFactory.class, KEY_FORMAT);
EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = helper.discoverEncodingFormat(SerializationFormatFactory.class, VALUE_FORMAT);
// Validate the option data type.
helper.validateExcept(PROPERTIES_PREFIX);
validateSink(tableOptions, keyEncodingFormat, valueEncodingFormat, context.getPrimaryKeyIndexes());
Tuple2<int[], int[]> keyValueProjections = createKeyValueProjections(context.getCatalogTable());
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
final Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
Integer parallelism = tableOptions.get(SINK_PARALLELISM);
int batchSize = tableOptions.get(SINK_BUFFER_FLUSH_MAX_ROWS);
Duration batchInterval = tableOptions.get(SINK_BUFFER_FLUSH_INTERVAL);
SinkBufferFlushMode flushMode = new SinkBufferFlushMode(batchSize, batchInterval.toMillis());
// it will use hash partition if key is set else in round-robin behaviour.
return new KafkaDynamicSink(context.getPhysicalRowDataType(), context.getPhysicalRowDataType(), keyEncodingFormat, new EncodingFormatWrapper(valueEncodingFormat), keyValueProjections.f0, keyValueProjections.f1, keyPrefix, tableOptions.get(TOPIC).get(0), properties, null, DeliveryGuarantee.AT_LEAST_ONCE, true, flushMode, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
Aggregations