use of org.apache.flink.table.types.DataType in project flink by apache.
the class TableSchemaTest method testPhysicalRowDataType.
@Test
public void testPhysicalRowDataType() {
final TableSchema schema = TableSchema.builder().add(TableColumn.physical("f0", DataTypes.BIGINT())).add(TableColumn.metadata("f1", DataTypes.BIGINT(), true)).add(TableColumn.metadata("f2", DataTypes.BIGINT(), false)).add(TableColumn.physical("f3", DataTypes.STRING())).add(TableColumn.computed("f4", DataTypes.BIGINT(), "f0 + 1")).add(TableColumn.metadata("f5", DataTypes.BIGINT(), false)).build();
final DataType expectedDataType = DataTypes.ROW(DataTypes.FIELD("f0", DataTypes.BIGINT()), DataTypes.FIELD("f3", DataTypes.STRING())).notNull();
assertThat(schema.toPhysicalRowDataType(), equalTo(expectedDataType));
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class TableSchemaTest method testPersistedRowDataType.
@Test
public void testPersistedRowDataType() {
final TableSchema schema = TableSchema.builder().add(TableColumn.physical("f0", DataTypes.BIGINT())).add(TableColumn.metadata("f1", DataTypes.BIGINT(), true)).add(TableColumn.metadata("f2", DataTypes.BIGINT(), false)).add(TableColumn.physical("f3", DataTypes.STRING())).add(TableColumn.computed("f4", DataTypes.BIGINT(), "f0 + 1")).add(TableColumn.metadata("f5", DataTypes.BIGINT(), false)).build();
final DataType expectedDataType = DataTypes.ROW(DataTypes.FIELD("f0", DataTypes.BIGINT()), DataTypes.FIELD("f2", DataTypes.BIGINT()), DataTypes.FIELD("f3", DataTypes.STRING()), DataTypes.FIELD("f5", DataTypes.BIGINT())).notNull();
assertThat(schema.toPersistedRowDataType(), equalTo(expectedDataType));
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class CommonExecLegacySink method translateToTransformation.
/**
* Translates {@link TableSink} into a {@link Transformation}.
*
* @param withChangeFlag Set to true to emit records with change flags.
* @return The {@link Transformation} that corresponds to the translated {@link TableSink}.
*/
@SuppressWarnings("unchecked")
private Transformation<T> translateToTransformation(PlannerBase planner, ExecNodeConfig config, boolean withChangeFlag) {
// if no change flags are requested, verify table is an insert-only (append-only) table.
if (!withChangeFlag && needRetraction) {
throw new TableException("Table is not an append-only table. " + "Use the toRetractStream() in order to handle add and retract messages.");
}
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final RowType convertedInputRowType = checkAndConvertInputTypeIfNeeded(inputRowType);
final DataType resultDataType = tableSink.getConsumedDataType();
if (CodeGenUtils.isInternalClass(resultDataType)) {
return (Transformation<T>) inputTransform;
} else {
final int rowtimeIndex = getRowtimeIndex(inputRowType);
final DataType physicalOutputType = TableSinkUtils.inferSinkPhysicalDataType(resultDataType, convertedInputRowType, withChangeFlag);
final TypeInformation<T> outputTypeInfo = SinkCodeGenerator.deriveSinkOutputTypeInfo(tableSink, physicalOutputType, withChangeFlag);
final CodeGenOperatorFactory<T> converterOperator = SinkCodeGenerator.generateRowConverterOperator(new CodeGeneratorContext(config.getTableConfig()), convertedInputRowType, tableSink, physicalOutputType, withChangeFlag, "SinkConversion", rowtimeIndex);
final String description = "SinkConversion To " + resultDataType.getConversionClass().getSimpleName();
return ExecNodeUtil.createOneInputTransformation(inputTransform, createFormattedTransformationName(description, "SinkConversion", config), createFormattedTransformationDescription(description, config), converterOperator, outputTypeInfo, inputTransform.getParallelism());
}
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class WritingMetadataSpec method apply.
@Override
public void apply(DynamicTableSink tableSink) {
if (tableSink instanceof SupportsWritingMetadata) {
DataType consumedDataType = TypeConversions.fromLogicalToDataType(consumedType);
((SupportsWritingMetadata) tableSink).applyWritableMetadata(metadataKeys, consumedDataType);
} else {
throw new TableException(String.format("%s does not support SupportsWritingMetadata.", tableSink.getClass().getName()));
}
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class BatchExecLegacySink method checkAndConvertInputTypeIfNeeded.
@Override
protected RowType checkAndConvertInputTypeIfNeeded(RowType inputRowType) {
final DataType resultDataType = tableSink.getConsumedDataType();
validateType(resultDataType);
return inputRowType;
}
Aggregations