use of org.apache.flink.table.catalog.Column in project flink by apache.
the class ElasticsearchDynamicSinkFactoryBase method getPrimaryKeyLogicalTypesWithIndex.
List<LogicalTypeWithIndex> getPrimaryKeyLogicalTypesWithIndex(Context context) {
DataType physicalRowDataType = context.getPhysicalRowDataType();
int[] primaryKeyIndexes = context.getPrimaryKeyIndexes();
if (primaryKeyIndexes.length != 0) {
DataType pkDataType = Projection.of(primaryKeyIndexes).project(physicalRowDataType);
ElasticsearchValidationUtils.validatePrimaryKey(pkDataType);
}
ResolvedSchema resolvedSchema = context.getCatalogTable().getResolvedSchema();
return Arrays.stream(primaryKeyIndexes).mapToObj(index -> {
Optional<Column> column = resolvedSchema.getColumn(index);
if (!column.isPresent()) {
throw new IllegalStateException(String.format("No primary key column found with index '%s'.", index));
}
LogicalType logicalType = column.get().getDataType().getLogicalType();
return new LogicalTypeWithIndex(index, logicalType);
}).collect(Collectors.toList());
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class DynamicSinkUtils method validateAndApplyMetadata.
private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
final List<Column> columns = schema.getColumns();
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
if (metadataColumns.isEmpty()) {
return;
}
if (!(sink instanceof SupportsWritingMetadata)) {
throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
}
final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
metadataColumns.forEach(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
// check that metadata key is valid
if (expectedMetadataDataType == null) {
throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
}
// check that types are compatible
if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
if (metadataKey.equals(metadataColumn.getName())) {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
} else {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
}
}
});
sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class CatalogSchemaTable method getRowType.
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
// The following block is a workaround to support tables defined by
// TableEnvironment.connect() and
// the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
// It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
TableSource<?> source = sourceOpt.get();
if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
// If the table is defined by TableEnvironment.connect(), and use the legacy
// proctime and rowtime
// descriptors, the TableSchema should fallback to
// ConnectorCatalogTable#calculateSourceSchema
tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
}
return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
}
final List<String> fieldNames = schema.getColumnNames();
final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class DynamicSourceUtils method pushGeneratedProjection.
/**
* Creates a projection that adds computed columns and finalizes the table schema.
*/
private static void pushGeneratedProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final ExpressionConverter converter = new ExpressionConverter(relBuilder);
final List<RexNode> projection = schema.getColumns().stream().map(c -> {
if (c instanceof ComputedColumn) {
final ComputedColumn computedColumn = (ComputedColumn) c;
return computedColumn.getExpression().accept(converter);
} else {
return relBuilder.field(c.getName());
}
}).collect(Collectors.toList());
relBuilder.projectNamed(projection, schema.getColumns().stream().map(Column::getName).collect(Collectors.toList()), true);
}
use of org.apache.flink.table.catalog.Column in project flink by apache.
the class DataStreamJavaITCase method testFromAndToChangelogStreamEventTime.
@Test
public void testFromAndToChangelogStreamEventTime() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final DataStream<Tuple3<Long, Integer, String>> dataStream = getWatermarkedDataStream();
final DataStream<Row> changelogStream = dataStream.map(t -> Row.ofKind(RowKind.INSERT, t.f1, t.f2)).returns(Types.ROW(Types.INT, Types.STRING));
// derive physical columns and add a rowtime
final Table table = tableEnv.fromChangelogStream(changelogStream, Schema.newBuilder().columnByMetadata("rowtime", TIMESTAMP_LTZ(3)).columnByExpression("computed", $("f1").upperCase()).watermark("rowtime", sourceWatermark()).build());
tableEnv.createTemporaryView("t", table);
// access and reorder columns
final Table reordered = tableEnv.sqlQuery("SELECT computed, rowtime, f0 FROM t");
// write out the rowtime column with fully declared schema
final DataStream<Row> result = tableEnv.toChangelogStream(reordered, Schema.newBuilder().column("f1", STRING()).columnByMetadata("rowtime", TIMESTAMP_LTZ(3)).columnByExpression("ignored", $("f1").upperCase()).column("f0", INT()).build());
// test event time window and field access
testResult(result.keyBy(k -> k.getField("f1")).window(TumblingEventTimeWindows.of(Time.milliseconds(5))).<Row>apply((key, window, input, out) -> {
int sum = 0;
for (Row row : input) {
sum += row.<Integer>getFieldAs("f0");
}
out.collect(Row.of(key, sum));
}).returns(Types.ROW(Types.STRING, Types.INT)), Row.of("A", 47), Row.of("C", 1000), Row.of("C", 1000));
}
Aggregations