use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class HBaseDynamicTableFactoryTest method testTableSinkFactory.
@Test
public void testTableSinkFactory() {
ResolvedSchema schema = ResolvedSchema.of(Column.physical(ROWKEY, STRING()), Column.physical(FAMILY1, ROW(FIELD(COL1, DOUBLE()), FIELD(COL2, INT()))), Column.physical(FAMILY2, ROW(FIELD(COL1, INT()), FIELD(COL3, BIGINT()))), Column.physical(FAMILY3, ROW(FIELD(COL2, BOOLEAN()), FIELD(COL3, STRING()))), Column.physical(FAMILY4, ROW(FIELD(COL1, DECIMAL(10, 3)), FIELD(COL2, TIMESTAMP(3)), FIELD(COL3, DATE()), FIELD(COL4, TIME()))));
DynamicTableSink sink = createTableSink(schema, getAllOptions());
assertTrue(sink instanceof HBaseDynamicTableSink);
HBaseDynamicTableSink hbaseSink = (HBaseDynamicTableSink) sink;
HBaseTableSchema hbaseSchema = hbaseSink.getHBaseTableSchema();
assertEquals(0, hbaseSchema.getRowKeyIndex());
assertEquals(Optional.of(STRING()), hbaseSchema.getRowKeyDataType());
assertArrayEquals(new String[] { "f1", "f2", "f3", "f4" }, hbaseSchema.getFamilyNames());
assertArrayEquals(new String[] { "c1", "c2" }, hbaseSchema.getQualifierNames("f1"));
assertArrayEquals(new String[] { "c1", "c3" }, hbaseSchema.getQualifierNames("f2"));
assertArrayEquals(new String[] { "c2", "c3" }, hbaseSchema.getQualifierNames("f3"));
assertArrayEquals(new String[] { "c1", "c2", "c3", "c4" }, hbaseSchema.getQualifierNames("f4"));
assertArrayEquals(new DataType[] { DOUBLE(), INT() }, hbaseSchema.getQualifierDataTypes("f1"));
assertArrayEquals(new DataType[] { INT(), BIGINT() }, hbaseSchema.getQualifierDataTypes("f2"));
assertArrayEquals(new DataType[] { BOOLEAN(), STRING() }, hbaseSchema.getQualifierDataTypes("f3"));
assertArrayEquals(new DataType[] { DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME() }, hbaseSchema.getQualifierDataTypes("f4"));
// verify hadoop Configuration
org.apache.hadoop.conf.Configuration expectedConfiguration = HBaseConfigurationUtil.getHBaseConfiguration();
expectedConfiguration.set(HConstants.ZOOKEEPER_QUORUM, "localhost:2181");
expectedConfiguration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/flink");
expectedConfiguration.set("hbase.security.authentication", "kerberos");
org.apache.hadoop.conf.Configuration actualConfiguration = hbaseSink.getConfiguration();
assertEquals(IteratorUtils.toList(expectedConfiguration.iterator()), IteratorUtils.toList(actualConfiguration.iterator()));
// verify tableName
assertEquals("testHBastTable", hbaseSink.getTableName());
HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder().setBufferFlushMaxRows(1000).setBufferFlushIntervalMillis(1000).setBufferFlushMaxSizeInBytes(2 * 1024 * 1024).build();
HBaseWriteOptions actualWriteOptions = hbaseSink.getWriteOptions();
assertEquals(expectedWriteOptions, actualWriteOptions);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class BatchExecSink method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<Object> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final Transformation<RowData> inputTransform = (Transformation<RowData>) getInputEdges().get(0).translateToPlan(planner);
final DynamicTableSink tableSink = tableSinkSpec.getTableSink(planner.getFlinkContext());
return createSinkTransformation(planner.getExecEnv(), config, inputTransform, tableSink, -1, false);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class FileSystemTableFactoryTest method testSourceSink.
@Test
public void testSourceSink() {
DescriptorProperties descriptor = new DescriptorProperties();
descriptor.putString(FactoryUtil.CONNECTOR.key(), "filesystem");
descriptor.putString("path", "/tmp");
descriptor.putString("format", "testcsv");
// test ignore format options
descriptor.putString("testcsv.my_option", "my_value");
DynamicTableSource source = createTableSource(SCHEMA, descriptor.asMap());
assertTrue(source instanceof FileSystemTableSource);
DynamicTableSink sink = createTableSink(SCHEMA, descriptor.asMap());
assertTrue(sink instanceof FileSystemTableSink);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class StreamExecSink method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<Object> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final DynamicTableSink tableSink = tableSinkSpec.getTableSink(planner.getFlinkContext());
final boolean isCollectSink = tableSink instanceof CollectDynamicSink;
final List<Integer> rowtimeFieldIndices = new ArrayList<>();
for (int i = 0; i < inputRowType.getFieldCount(); ++i) {
if (TypeCheckUtils.isRowTime(inputRowType.getTypeAt(i))) {
rowtimeFieldIndices.add(i);
}
}
final int rowtimeFieldIndex;
if (rowtimeFieldIndices.size() > 1 && !isCollectSink) {
throw new TableException(String.format("The query contains more than one rowtime attribute column [%s] for writing into table '%s'.\n" + "Please select the column that should be used as the event-time timestamp " + "for the table sink by casting all other columns to regular TIMESTAMP or TIMESTAMP_LTZ.", rowtimeFieldIndices.stream().map(i -> inputRowType.getFieldNames().get(i)).collect(Collectors.joining(", ")), tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString()));
} else if (rowtimeFieldIndices.size() == 1) {
rowtimeFieldIndex = rowtimeFieldIndices.get(0);
} else {
rowtimeFieldIndex = -1;
}
return createSinkTransformation(planner.getExecEnv(), config, inputTransform, tableSink, rowtimeFieldIndex, upsertMaterialize);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class DynamicSinkUtils method pushMetadataProjection.
/**
* Creates a projection that reorders physical and metadata columns according to the consumed
* data type of the sink. It casts metadata columns into the expected data type.
*
* @see SupportsWritingMetadata
*/
private static void pushMetadataProjection(FlinkRelBuilder relBuilder, FlinkTypeFactory typeFactory, ResolvedSchema schema, DynamicTableSink sink) {
final RexBuilder rexBuilder = relBuilder.getRexBuilder();
final List<Column> columns = schema.getColumns();
final List<Integer> physicalColumns = extractPhysicalColumns(schema);
final Map<String, Integer> keyToMetadataColumn = extractPersistedMetadataColumns(schema).stream().collect(Collectors.toMap(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
return metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
}, Function.identity()));
final List<Integer> metadataColumns = createRequiredMetadataKeys(schema, sink).stream().map(keyToMetadataColumn::get).collect(Collectors.toList());
final List<String> fieldNames = Stream.concat(physicalColumns.stream().map(columns::get).map(Column::getName), metadataColumns.stream().map(columns::get).map(MetadataColumn.class::cast).map(c -> c.getMetadataKey().orElse(c.getName()))).collect(Collectors.toList());
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final List<RexNode> fieldNodes = Stream.concat(physicalColumns.stream().map(pos -> {
final int posAdjusted = adjustByVirtualColumns(columns, pos);
return relBuilder.field(posAdjusted);
}), metadataColumns.stream().map(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType expectedType = metadataMap.get(metadataKey).getLogicalType();
final RelDataType expectedRelDataType = typeFactory.createFieldTypeFromLogicalType(expectedType);
final int posAdjusted = adjustByVirtualColumns(columns, pos);
return rexBuilder.makeAbstractCast(expectedRelDataType, relBuilder.field(posAdjusted));
})).collect(Collectors.toList());
relBuilder.projectNamed(fieldNodes, fieldNames, true);
}
Aggregations