use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSinkUtils method convertCollectToRel.
/**
* Converts an {@link TableResult#collect()} sink to a {@link RelNode}.
*/
public static RelNode convertCollectToRel(FlinkRelBuilder relBuilder, RelNode input, CollectModifyOperation collectModifyOperation, ReadableConfig configuration, ClassLoader classLoader) {
final DataTypeFactory dataTypeFactory = unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory();
final ResolvedSchema childSchema = collectModifyOperation.getChild().getResolvedSchema();
final ResolvedSchema schema = ResolvedSchema.physical(childSchema.getColumnNames(), childSchema.getColumnDataTypes());
final ResolvedCatalogTable catalogTable = new ResolvedCatalogTable(new ExternalCatalogTable(Schema.newBuilder().fromResolvedSchema(schema).build()), schema);
final ContextResolvedTable contextResolvedTable = ContextResolvedTable.anonymous("collect", catalogTable);
final DataType consumedDataType = fixCollectDataType(dataTypeFactory, schema);
final String zone = configuration.get(TableConfigOptions.LOCAL_TIME_ZONE);
final ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
final CollectDynamicSink tableSink = new CollectDynamicSink(contextResolvedTable.getIdentifier(), consumedDataType, configuration.get(CollectSinkOperatorFactory.MAX_BATCH_SIZE), configuration.get(CollectSinkOperatorFactory.SOCKET_TIMEOUT), classLoader, zoneId, configuration.get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR).isEnabled());
collectModifyOperation.setSelectResultProvider(tableSink.getSelectResultProvider());
collectModifyOperation.setConsumedDataType(consumedDataType);
return convertSinkToRel(relBuilder, input, // dynamicOptions
Collections.emptyMap(), contextResolvedTable, // staticPartitions
Collections.emptyMap(), false, tableSink);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class CatalogSchemaTable method getRowType.
@Override
public RelDataType getRowType(RelDataTypeFactory typeFactory) {
final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
final ResolvedSchema schema = contextResolvedTable.getResolvedSchema();
// The following block is a workaround to support tables defined by
// TableEnvironment.connect() and
// the actual table sources implement DefinedProctimeAttribute/DefinedRowtimeAttributes.
// It should be removed after we remove DefinedProctimeAttribute/DefinedRowtimeAttributes.
Optional<TableSource<?>> sourceOpt = findAndCreateTableSource();
if (isStreamingMode && sourceOpt.isPresent() && schema.getColumns().stream().allMatch(Column::isPhysical) && schema.getWatermarkSpecs().isEmpty()) {
TableSchema tableSchema = TableSchema.fromResolvedSchema(schema);
TableSource<?> source = sourceOpt.get();
if (TableSourceValidation.hasProctimeAttribute(source) || TableSourceValidation.hasRowtimeAttribute(source)) {
// If the table is defined by TableEnvironment.connect(), and use the legacy
// proctime and rowtime
// descriptors, the TableSchema should fallback to
// ConnectorCatalogTable#calculateSourceSchema
tableSchema = ConnectorCatalogTable.calculateSourceSchema(source, false);
}
return TableSourceUtil.getSourceRowType(flinkTypeFactory, tableSchema, scala.Option.empty(), true);
}
final List<String> fieldNames = schema.getColumnNames();
final List<LogicalType> fieldTypes = schema.getColumnDataTypes().stream().map(DataType::getLogicalType).map(PlannerTypeUtils::removeLegacyTypes).collect(Collectors.toList());
return flinkTypeFactory.buildRelNodeRowType(fieldNames, fieldTypes);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSourceUtils method pushGeneratedProjection.
/**
* Creates a projection that adds computed columns and finalizes the table schema.
*/
private static void pushGeneratedProjection(FlinkRelBuilder relBuilder, ResolvedSchema schema) {
final ExpressionConverter converter = new ExpressionConverter(relBuilder);
final List<RexNode> projection = schema.getColumns().stream().map(c -> {
if (c instanceof ComputedColumn) {
final ComputedColumn computedColumn = (ComputedColumn) c;
return computedColumn.getExpression().accept(converter);
} else {
return relBuilder.field(c.getName());
}
}).collect(Collectors.toList());
relBuilder.projectNamed(projection, schema.getColumns().stream().map(Column::getName).collect(Collectors.toList()), true);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class DynamicSourceUtils method prepareDynamicSource.
/**
* Prepares the given {@link DynamicTableSource}. It check whether the source is compatible with
* the given schema and applies initial parameters.
*/
public static void prepareDynamicSource(String tableDebugName, ResolvedCatalogTable table, DynamicTableSource source, boolean isBatchMode, ReadableConfig config) {
final ResolvedSchema schema = table.getResolvedSchema();
validateAndApplyMetadata(tableDebugName, schema, source);
if (source instanceof ScanTableSource) {
validateScanSource(tableDebugName, schema, (ScanTableSource) source, isBatchMode, config);
}
// lookup table source is validated in LookupJoin node
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class RawFormatFactoryTest method testInvalidSchema.
@Test
public void testInvalidSchema() {
ResolvedSchema invalidSchema = ResolvedSchema.of(Column.physical("f0", DataTypes.STRING()), Column.physical("f1", DataTypes.BIGINT()));
String expectedError = "The 'raw' format only supports single physical column. " + "However the defined schema contains multiple physical columns: [`f0` STRING, `f1` BIGINT]";
try {
createDeserializationSchema(invalidSchema, getBasicOptions());
fail();
} catch (Exception e) {
assertThat(e, hasMessage(equalTo(expectedError)));
}
try {
createSerializationSchema(invalidSchema, getBasicOptions());
fail();
} catch (Exception e) {
assertThat(e, hasMessage(equalTo(expectedError)));
}
}
Aggregations