use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class HBaseDynamicTableFactoryTest method testParallelismOptions.
@Test
public void testParallelismOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.parallelism", "2");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
SinkFunctionProvider provider = (SinkFunctionProvider) hbaseSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertEquals(2, (long) provider.getParallelism().get());
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class HBaseDynamicTableFactoryTest method testParallelismOptions.
@Test
public void testParallelismOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.parallelism", "2");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
SinkFunctionProvider provider = (SinkFunctionProvider) hbaseSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertEquals(2, (long) provider.getParallelism().get());
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class DebeziumAvroFormatFactoryTest method createSerializationSchema.
private static SerializationSchema<RowData> createSerializationSchema(Map<String, String> options) {
final DynamicTableSink actualSink = createTableSink(SCHEMA, options);
assertThat(actualSink, instanceOf(TestDynamicTableFactory.DynamicTableSinkMock.class));
TestDynamicTableFactory.DynamicTableSinkMock sinkMock = (TestDynamicTableFactory.DynamicTableSinkMock) actualSink;
return sinkMock.valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SCHEMA.toPhysicalRowDataType());
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class JdbcDynamicTableSinkITCase method testFlushBufferWhenCheckpoint.
@Test
public void testFlushBufferWhenCheckpoint() throws Exception {
Map<String, String> options = new HashMap<>();
options.put("connector", "jdbc");
options.put("url", DB_URL);
options.put("table-name", OUTPUT_TABLE5);
options.put("sink.buffer-flush.interval", "0");
ResolvedSchema schema = ResolvedSchema.of(Column.physical("id", DataTypes.BIGINT().notNull()));
DynamicTableSink tableSink = createTableSink(schema, options);
SinkRuntimeProviderContext context = new SinkRuntimeProviderContext(false);
SinkFunctionProvider sinkProvider = (SinkFunctionProvider) tableSink.getSinkRuntimeProvider(context);
GenericJdbcSinkFunction<RowData> sinkFunction = (GenericJdbcSinkFunction<RowData>) sinkProvider.createSinkFunction();
sinkFunction.setRuntimeContext(new MockStreamingRuntimeContext(true, 1, 0));
sinkFunction.open(new Configuration());
sinkFunction.invoke(GenericRowData.of(1L), SinkContextUtil.forTimestamp(1));
sinkFunction.invoke(GenericRowData.of(2L), SinkContextUtil.forTimestamp(1));
check(new Row[] {}, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
check(new Row[] { Row.of(1L), Row.of(2L) }, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.close();
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method verifyEncoderSubject.
private void verifyEncoderSubject(Consumer<Map<String, String>> optionModifier, String expectedValueSubject, String expectedKeySubject) {
Map<String, String> options = new HashMap<>();
// Kafka specific options.
options.put("connector", UpsertKafkaDynamicTableFactory.IDENTIFIER);
options.put("topic", SINK_TOPIC);
options.put("properties.group.id", "dummy");
options.put("properties.bootstrap.servers", "dummy");
optionModifier.accept(options);
final RowType rowType = (RowType) SINK_SCHEMA.toSinkRowDataType().getLogicalType();
final String valueFormat = options.getOrDefault(FactoryUtil.FORMAT.key(), options.get(KafkaConnectorOptions.VALUE_FORMAT.key()));
final String keyFormat = options.get(KafkaConnectorOptions.KEY_FORMAT.key());
KafkaDynamicSink sink = (KafkaDynamicSink) createTableSink(SINK_SCHEMA, options);
if (AVRO_CONFLUENT.equals(valueFormat)) {
SerializationSchema<RowData> actualValueEncoder = sink.valueEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SINK_SCHEMA.toSinkRowDataType());
assertEquals(createConfluentAvroSerSchema(rowType, expectedValueSubject), actualValueEncoder);
}
if (AVRO_CONFLUENT.equals(keyFormat)) {
assert sink.keyEncodingFormat != null;
SerializationSchema<RowData> actualKeyEncoder = sink.keyEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SINK_SCHEMA.toSinkRowDataType());
assertEquals(createConfluentAvroSerSchema(rowType, expectedKeySubject), actualKeyEncoder);
}
}
Aggregations