use of org.apache.flink.table.types.DataType in project flink by apache.
the class TableFormatFactoryBase method deriveSchema.
// --------------------------------------------------------------------------------------------
/**
* Finds the table schema that can be used for a format schema (without time attributes and
* generated columns).
*/
public static TableSchema deriveSchema(Map<String, String> properties) {
final DescriptorProperties descriptorProperties = new DescriptorProperties();
descriptorProperties.putProperties(properties);
final TableSchema.Builder builder = TableSchema.builder();
final TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
for (int i = 0; i < tableSchema.getFieldCount(); i++) {
final TableColumn tableColumn = tableSchema.getTableColumns().get(i);
final String fieldName = tableColumn.getName();
final DataType dataType = tableColumn.getType();
if (!tableColumn.isPhysical()) {
// skip non-physical columns
continue;
}
final boolean isProctime = descriptorProperties.getOptionalBoolean(SCHEMA + '.' + i + '.' + SCHEMA_PROCTIME).orElse(false);
final String timestampKey = SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_TYPE;
final boolean isRowtime = descriptorProperties.containsKey(timestampKey);
if (!isProctime && !isRowtime) {
// check for aliasing
final String aliasName = descriptorProperties.getOptionalString(SCHEMA + '.' + i + '.' + SCHEMA_FROM).orElse(fieldName);
builder.field(aliasName, dataType);
} else // only use the rowtime attribute if it references a field
if (isRowtime && descriptorProperties.isValue(timestampKey, ROWTIME_TIMESTAMPS_TYPE_VALUE_FROM_FIELD)) {
final String aliasName = descriptorProperties.getString(SCHEMA + '.' + i + '.' + ROWTIME_TIMESTAMPS_FROM);
builder.field(aliasName, dataType);
}
}
return builder.build();
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class AndArgumentTypeStrategy method inferArgumentType.
@Override
public Optional<DataType> inferArgumentType(CallContext callContext, int argumentPos, boolean throwOnFailure) {
final DataType actualDataType = callContext.getArgumentDataTypes().get(argumentPos);
final LogicalType actualType = actualDataType.getLogicalType();
Optional<DataType> closestDataType = Optional.empty();
for (ArgumentTypeStrategy strategy : argumentStrategies) {
final Optional<DataType> inferredDataType = strategy.inferArgumentType(callContext, argumentPos, throwOnFailure);
// argument type does not match at all
if (!inferredDataType.isPresent()) {
return Optional.empty();
}
final LogicalType inferredType = inferredDataType.get().getLogicalType();
// a more specific, casted argument type is available
if (!supportsAvoidingCast(actualType, inferredType) && !closestDataType.isPresent()) {
closestDataType = inferredDataType;
}
}
if (closestDataType.isPresent()) {
return closestDataType;
}
return Optional.of(actualDataType);
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DynamicSinkUtils method validateAndApplyMetadata.
private static void validateAndApplyMetadata(String tableDebugName, DynamicTableSink sink, ResolvedSchema schema, List<SinkAbilitySpec> sinkAbilitySpecs) {
final List<Column> columns = schema.getColumns();
final List<Integer> metadataColumns = extractPersistedMetadataColumns(schema);
if (metadataColumns.isEmpty()) {
return;
}
if (!(sink instanceof SupportsWritingMetadata)) {
throw new ValidationException(String.format("Table '%s' declares persistable metadata columns, but the underlying %s " + "doesn't implement the %s interface. If the column should not " + "be persisted, it can be declared with the VIRTUAL keyword.", tableDebugName, DynamicTableSink.class.getSimpleName(), SupportsWritingMetadata.class.getSimpleName()));
}
final Map<String, DataType> metadataMap = ((SupportsWritingMetadata) sink).listWritableMetadata();
metadataColumns.forEach(pos -> {
final MetadataColumn metadataColumn = (MetadataColumn) columns.get(pos);
final String metadataKey = metadataColumn.getMetadataKey().orElse(metadataColumn.getName());
final LogicalType metadataType = metadataColumn.getDataType().getLogicalType();
final DataType expectedMetadataDataType = metadataMap.get(metadataKey);
// check that metadata key is valid
if (expectedMetadataDataType == null) {
throw new ValidationException(String.format("Invalid metadata key '%s' in column '%s' of table '%s'. " + "The %s class '%s' supports the following metadata keys for writing:\n%s", metadataKey, metadataColumn.getName(), tableDebugName, DynamicTableSink.class.getSimpleName(), sink.getClass().getName(), String.join("\n", metadataMap.keySet())));
}
// check that types are compatible
if (!supportsExplicitCast(metadataType, expectedMetadataDataType.getLogicalType())) {
if (metadataKey.equals(metadataColumn.getName())) {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
} else {
throw new ValidationException(String.format("Invalid data type for metadata column '%s' with metadata key '%s' of table '%s'. " + "The column cannot be declared as '%s' because the type must be " + "castable to metadata type '%s'.", metadataColumn.getName(), metadataKey, tableDebugName, metadataType, expectedMetadataDataType.getLogicalType()));
}
}
});
sinkAbilitySpecs.add(new WritingMetadataSpec(createRequiredMetadataKeys(schema, sink), createConsumedType(schema, sink)));
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DynamicSinkUtils method createConsumedType.
/**
* Returns the {@link DataType} that a sink should consume as the output from the runtime.
*
* <p>The format looks as follows: {@code PHYSICAL COLUMNS + PERSISTED METADATA COLUMNS}
*/
private static RowType createConsumedType(ResolvedSchema schema, DynamicTableSink sink) {
final Map<String, DataType> metadataMap = extractMetadataMap(sink);
final Stream<RowField> physicalFields = schema.getColumns().stream().filter(Column::isPhysical).map(c -> new RowField(c.getName(), c.getDataType().getLogicalType()));
final Stream<RowField> metadataFields = createRequiredMetadataKeys(schema, sink).stream().map(k -> new RowField(k, metadataMap.get(k).getLogicalType()));
final List<RowField> rowFields = Stream.concat(physicalFields, metadataFields).collect(Collectors.toList());
return new RowType(false, rowFields);
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DynamicSinkUtils method convertCollectToRel.
/**
* Converts an {@link TableResult#collect()} sink to a {@link RelNode}.
*/
public static RelNode convertCollectToRel(FlinkRelBuilder relBuilder, RelNode input, CollectModifyOperation collectModifyOperation, ReadableConfig configuration, ClassLoader classLoader) {
final DataTypeFactory dataTypeFactory = unwrapContext(relBuilder).getCatalogManager().getDataTypeFactory();
final ResolvedSchema childSchema = collectModifyOperation.getChild().getResolvedSchema();
final ResolvedSchema schema = ResolvedSchema.physical(childSchema.getColumnNames(), childSchema.getColumnDataTypes());
final ResolvedCatalogTable catalogTable = new ResolvedCatalogTable(new ExternalCatalogTable(Schema.newBuilder().fromResolvedSchema(schema).build()), schema);
final ContextResolvedTable contextResolvedTable = ContextResolvedTable.anonymous("collect", catalogTable);
final DataType consumedDataType = fixCollectDataType(dataTypeFactory, schema);
final String zone = configuration.get(TableConfigOptions.LOCAL_TIME_ZONE);
final ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
final CollectDynamicSink tableSink = new CollectDynamicSink(contextResolvedTable.getIdentifier(), consumedDataType, configuration.get(CollectSinkOperatorFactory.MAX_BATCH_SIZE), configuration.get(CollectSinkOperatorFactory.SOCKET_TIMEOUT), classLoader, zoneId, configuration.get(ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR).isEnabled());
collectModifyOperation.setSelectResultProvider(tableSink.getSelectResultProvider());
collectModifyOperation.setConsumedDataType(consumedDataType);
return convertSinkToRel(relBuilder, input, // dynamicOptions
Collections.emptyMap(), contextResolvedTable, // staticPartitions
Collections.emptyMap(), false, tableSink);
}
Aggregations