use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class HiveTableFactoryTest method testHiveTable.
@Test
public void testHiveTable() throws Exception {
final ResolvedSchema schema = ResolvedSchema.of(Column.physical("name", DataTypes.STRING()), Column.physical("age", DataTypes.INT()));
catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
final Map<String, String> options = Collections.singletonMap(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
final CatalogTable table = new CatalogTableImpl(TableSchema.fromResolvedSchema(schema), options, "hive table");
catalog.createTable(new ObjectPath("mydb", "mytable"), table, true);
final DynamicTableSource tableSource = FactoryUtil.createDynamicTableSource((DynamicTableSourceFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSource instanceof HiveTableSource);
final DynamicTableSink tableSink = FactoryUtil.createDynamicTableSink((DynamicTableSinkFactory) catalog.getFactory().orElseThrow(IllegalStateException::new), ObjectIdentifier.of("mycatalog", "mydb", "mytable"), new ResolvedCatalogTable(table, schema), new Configuration(), Thread.currentThread().getContextClassLoader(), false);
assertTrue(tableSink instanceof HiveTableSink);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class KinesisDynamicTableFactoryTest method testGoodTableSource.
// --------------------------------------------------------------------------------------------
// Positive tests
// --------------------------------------------------------------------------------------------
@Test
public void testGoodTableSource() {
ResolvedSchema sourceSchema = defaultSourceSchema();
Map<String, String> sourceOptions = defaultTableOptions().build();
// Construct actual DynamicTableSource using FactoryUtil
KinesisDynamicSource actualSource = (KinesisDynamicSource) createTableSource(sourceSchema, sourceOptions);
// Construct expected DynamicTableSink using factory under test
KinesisDynamicSource expectedSource = new KinesisDynamicSource(sourceSchema.toPhysicalRowDataType(), STREAM_NAME, defaultConsumerProperties(), new TestFormatFactory.DecodingFormatMock(",", true));
// verify that the constructed DynamicTableSink is as expected
assertEquals(expectedSource, actualSource);
// verify that the copy of the constructed DynamicTableSink is as expected
assertEquals(expectedSource, actualSource.copy());
// verify produced sink
ScanTableSource.ScanRuntimeProvider functionProvider = actualSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
SourceFunction<RowData> sourceFunction = as(functionProvider, SourceFunctionProvider.class).createSourceFunction();
assertThat(sourceFunction, instanceOf(FlinkKinesisConsumer.class));
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class KinesisDynamicTableFactoryTest method testGoodTableSinkCopyForPartitionedTable.
@Test
public void testGoodTableSinkCopyForPartitionedTable() {
ResolvedSchema sinkSchema = defaultSinkSchema();
DataType physicalDataType = sinkSchema.toPhysicalRowDataType();
Map<String, String> sinkOptions = defaultSinkTableOptions().build();
List<String> sinkPartitionKeys = Arrays.asList("name", "curr_id");
// Construct actual DynamicTableSink using FactoryUtil
KinesisDynamicSink actualSink = (KinesisDynamicSink) createTableSink(sinkSchema, sinkPartitionKeys, sinkOptions);
// Construct expected DynamicTableSink using factory under test
KinesisDynamicSink expectedSink = (KinesisDynamicSink) new KinesisDynamicSink.KinesisDynamicTableSinkBuilder().setConsumedDataType(physicalDataType).setStream(STREAM_NAME).setKinesisClientProperties(defaultProducerProperties()).setEncodingFormat(new TestFormatFactory.EncodingFormatMock(",")).setPartitioner(new RowDataFieldsKinesisPartitionKeyGenerator((RowType) physicalDataType.getLogicalType(), sinkPartitionKeys)).build();
Assertions.assertThat(actualSink).isEqualTo(expectedSink.copy());
Assertions.assertThat(expectedSink).isNotSameAs(expectedSink.copy());
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class JdbcDynamicTableSinkITCase method testFlushBufferWhenCheckpoint.
@Test
public void testFlushBufferWhenCheckpoint() throws Exception {
Map<String, String> options = new HashMap<>();
options.put("connector", "jdbc");
options.put("url", DB_URL);
options.put("table-name", OUTPUT_TABLE5);
options.put("sink.buffer-flush.interval", "0");
ResolvedSchema schema = ResolvedSchema.of(Column.physical("id", DataTypes.BIGINT().notNull()));
DynamicTableSink tableSink = createTableSink(schema, options);
SinkRuntimeProviderContext context = new SinkRuntimeProviderContext(false);
SinkFunctionProvider sinkProvider = (SinkFunctionProvider) tableSink.getSinkRuntimeProvider(context);
GenericJdbcSinkFunction<RowData> sinkFunction = (GenericJdbcSinkFunction<RowData>) sinkProvider.createSinkFunction();
sinkFunction.setRuntimeContext(new MockStreamingRuntimeContext(true, 1, 0));
sinkFunction.open(new Configuration());
sinkFunction.invoke(GenericRowData.of(1L), SinkContextUtil.forTimestamp(1));
sinkFunction.invoke(GenericRowData.of(2L), SinkContextUtil.forTimestamp(1));
check(new Row[] {}, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
check(new Row[] { Row.of(1L), Row.of(2L) }, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.close();
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DataTypeUtilsTest method testExpandDistinctType.
@Test
public void testExpandDistinctType() {
FieldsDataType dataType = (FieldsDataType) ROW(FIELD("f0", INT()), FIELD("f1", STRING()), FIELD("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), FIELD("f3", TIMESTAMP(3)));
LogicalType originalLogicalType = dataType.getLogicalType();
DistinctType distinctLogicalType = DistinctType.newBuilder(ObjectIdentifier.of("catalog", "database", "type"), originalLogicalType).build();
DataType distinctDataType = new FieldsDataType(distinctLogicalType, dataType.getChildren());
ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(distinctDataType);
assertThat(schema).isEqualTo(ResolvedSchema.of(Column.physical("f0", INT()), Column.physical("f1", STRING()), Column.physical("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), Column.physical("f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class))));
}
Aggregations