use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class HBaseDynamicTableFactoryTest method testBufferFlushOptions.
@Test
public void testBufferFlushOptions() {
Map<String, String> options = getAllOptions();
options.put("sink.buffer-flush.max-size", "10mb");
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "10s");
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()));
DynamicTableSink sink = createTableSink(schema, options);
HBaseWriteOptions expected = HBaseWriteOptions.builder().setBufferFlushMaxRows(100).setBufferFlushIntervalMillis(10 * 1000).setBufferFlushMaxSizeInBytes(10 * 1024 * 1024).build();
HBaseWriteOptions actual = ((HBaseDynamicTableSink) sink).getWriteOptions();
assertEquals(expected, actual);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class DebeziumAvroFormatFactoryTest method createSerializationSchema.
private static SerializationSchema<RowData> createSerializationSchema(Map<String, String> options) {
final DynamicTableSink actualSink = createTableSink(SCHEMA, options);
assertThat(actualSink, instanceOf(TestDynamicTableFactory.DynamicTableSinkMock.class));
TestDynamicTableFactory.DynamicTableSinkMock sinkMock = (TestDynamicTableFactory.DynamicTableSinkMock) actualSink;
return sinkMock.valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SCHEMA.toPhysicalRowDataType());
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class ElasticsearchDynamicSinkFactoryBaseTest method testSinkParallelism.
@Test
public void testSinkParallelism() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
DynamicTableSink sink = sinkFactory.createDynamicTableSink(createPrefilledTestContext().withOption(SINK_PARALLELISM.key(), "2").build());
assertThat(sink).isInstanceOf(ElasticsearchDynamicSink.class);
ElasticsearchDynamicSink esSink = (ElasticsearchDynamicSink) sink;
SinkV2Provider provider = (SinkV2Provider) esSink.getSinkRuntimeProvider(new ElasticsearchUtil.MockContext());
assertThat(2).isEqualTo(provider.getParallelism().get());
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class HiveTableFactoryTest method testHiveTable.
@Test
public void testHiveTable() throws Exception {
final ResolvedSchema schema = ResolvedSchema.of(Column.physical("name", DataTypes.STRING()), Column.physical("age", DataTypes.INT()));
catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
final Map<String, String> options = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
final CatalogTable table = new CatalogTableImpl(TableSchema.fromResolvedSchema(schema), options, "hive table");
catalog.createTable(new ObjectPath("mydb", "mytable"), table, true);
final DynamicTableSource tableSource = FactoryUtil.createDynamicTableSource((DynamicTableSourceFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSource instanceof HiveTableSource);
final DynamicTableSink tableSink = FactoryUtil.createDynamicTableSink((DynamicTableSinkFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSink instanceof HiveTableSink);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJdbcCommonProperties.
@Test
public void testJdbcCommonProperties() {
Map<String, String> properties = getAllOptions();
properties.put("driver", "org.apache.derby.jdbc.EmbeddedDriver");
properties.put("username", "user");
properties.put("password", "pass");
properties.put("connection.max-retry-timeout", "120s");
// validation for source
DynamicTableSource actualSource = createTableSource(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setDriverName("org.apache.derby.jdbc.EmbeddedDriver").setUsername("user").setPassword("pass").setConnectionCheckTimeoutSeconds(120).build();
JdbcLookupOptions lookupOptions = JdbcLookupOptions.builder().setCacheMaxSize(-1).setCacheExpireMs(10_000).setMaxRetryTimes(3).build();
JdbcDynamicTableSource expectedSource = new JdbcDynamicTableSource(options, JdbcReadOptions.builder().build(), lookupOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedSource, actualSource);
// validation for sink
DynamicTableSink actualSink = createTableSink(SCHEMA, properties);
// default flush configurations
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expectedSink = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expectedSink, actualSink);
}
Aggregations