use of org.apache.flink.table.types.DataType in project flink by apache.
the class CanalJsonDecodingFormat method createRuntimeDecoder.
@Override
public DeserializationSchema<RowData> createRuntimeDecoder(DynamicTableSource.Context context, DataType physicalDataType, int[][] projections) {
physicalDataType = Projection.of(projections).project(physicalDataType);
final List<ReadableMetadata> readableMetadata = metadataKeys.stream().map(k -> Stream.of(ReadableMetadata.values()).filter(rm -> rm.key.equals(k)).findFirst().orElseThrow(IllegalStateException::new)).collect(Collectors.toList());
final List<DataTypes.Field> metadataFields = readableMetadata.stream().map(m -> DataTypes.FIELD(m.key, m.dataType)).collect(Collectors.toList());
final DataType producedDataType = DataTypeUtils.appendRowFields(physicalDataType, metadataFields);
final TypeInformation<RowData> producedTypeInfo = context.createTypeInformation(producedDataType);
return CanalJsonDeserializationSchema.builder(physicalDataType, readableMetadata, producedTypeInfo).setDatabase(database).setTable(table).setIgnoreParseErrors(ignoreParseErrors).setTimestampFormat(timestampFormat).build();
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class JsonFormatFactory method createEncodingFormat.
@Override
public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
JsonFormatOptionsUtil.validateEncodingFormatOptions(formatOptions);
TimestampFormat timestampOption = JsonFormatOptionsUtil.getTimestampFormat(formatOptions);
JsonFormatOptions.MapNullKeyMode mapNullKeyMode = JsonFormatOptionsUtil.getMapNullKeyMode(formatOptions);
String mapNullKeyLiteral = formatOptions.get(MAP_NULL_KEY_LITERAL);
final boolean encodeDecimalAsPlainNumber = formatOptions.get(ENCODE_DECIMAL_AS_PLAIN_NUMBER);
return new EncodingFormat<SerializationSchema<RowData>>() {
@Override
public SerializationSchema<RowData> createRuntimeEncoder(DynamicTableSink.Context context, DataType consumedDataType) {
final RowType rowType = (RowType) consumedDataType.getLogicalType();
return new JsonRowDataSerializationSchema(rowType, timestampOption, mapNullKeyMode, mapNullKeyLiteral, encodeDecimalAsPlainNumber);
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
};
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DependencyTest method testTableFactoryDiscovery.
@Test
public void testTableFactoryDiscovery() throws Exception {
final LocalExecutor executor = createLocalExecutor();
try {
final TableResult tableResult = executeSql(executor, SESSION_ID, "DESCRIBE TableNumber1");
assertEquals(tableResult.getResolvedSchema(), ResolvedSchema.physical(new String[] { "name", "type", "null", "key", "extras", "watermark" }, new DataType[] { DataTypes.STRING(), DataTypes.STRING(), DataTypes.BOOLEAN(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING() }));
List<Row> schemaData = Arrays.asList(Row.of("IntegerField1", "INT", true, null, null, null), Row.of("StringField1", "STRING", true, null, null, null), Row.of("rowtimeField", "TIMESTAMP(3) *ROWTIME*", true, null, null, "`rowtimeField`"));
assertEquals(schemaData, CollectionUtil.iteratorToList(tableResult.collect()));
} finally {
executor.closeSession(SESSION_ID);
}
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class BaseMaterializedResultTest method createInternalBinaryRowDataConverter.
static Function<Row, BinaryRowData> createInternalBinaryRowDataConverter(DataType dataType) {
DataStructureConverter<Object, Object> converter = DataStructureConverters.getConverter(dataType);
RowDataSerializer serializer = new RowDataSerializer((RowType) dataType.getLogicalType());
return row -> serializer.toBinaryRow((RowData) converter.toInternalOrNull(row)).copy();
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DescriptorProperties method getOptionalTableSchema.
/**
* Returns a table schema under the given key if it exists.
*/
public Optional<TableSchema> getOptionalTableSchema(String key) {
// filter for number of fields
final int fieldCount = properties.keySet().stream().filter((k) -> k.startsWith(key) && // "key." is the prefix.
SCHEMA_COLUMN_NAME_SUFFIX.matcher(k.substring(key.length() + 1)).matches()).mapToInt((k) -> 1).sum();
if (fieldCount == 0) {
return Optional.empty();
}
// validate fields and build schema
final TableSchema.Builder schemaBuilder = TableSchema.builder();
for (int i = 0; i < fieldCount; i++) {
final String nameKey = key + '.' + i + '.' + NAME;
final String legacyTypeKey = key + '.' + i + '.' + TYPE;
final String typeKey = key + '.' + i + '.' + DATA_TYPE;
final String exprKey = key + '.' + i + '.' + EXPR;
final String metadataKey = key + '.' + i + '.' + METADATA;
final String virtualKey = key + '.' + i + '.' + VIRTUAL;
final String name = optionalGet(nameKey).orElseThrow(exceptionSupplier(nameKey));
final DataType type;
if (containsKey(typeKey)) {
type = getDataType(typeKey);
} else if (containsKey(legacyTypeKey)) {
type = TypeConversions.fromLegacyInfoToDataType(getType(legacyTypeKey));
} else {
throw exceptionSupplier(typeKey).get();
}
final Optional<String> expr = optionalGet(exprKey);
final Optional<String> metadata = optionalGet(metadataKey);
final boolean virtual = getOptionalBoolean(virtualKey).orElse(false);
// computed column
if (expr.isPresent()) {
schemaBuilder.add(TableColumn.computed(name, type, expr.get()));
} else // metadata column
if (metadata.isPresent()) {
final String metadataAlias = metadata.get();
if (metadataAlias.equals(name)) {
schemaBuilder.add(TableColumn.metadata(name, type, virtual));
} else {
schemaBuilder.add(TableColumn.metadata(name, type, metadataAlias, virtual));
}
} else // physical column
{
schemaBuilder.add(TableColumn.physical(name, type));
}
}
// extract watermark information
// filter for number of fields
String watermarkPrefixKey = key + '.' + WATERMARK;
final int watermarkCount = properties.keySet().stream().filter((k) -> k.startsWith(watermarkPrefixKey) && k.endsWith('.' + WATERMARK_ROWTIME)).mapToInt((k) -> 1).sum();
if (watermarkCount > 0) {
for (int i = 0; i < watermarkCount; i++) {
final String rowtimeKey = watermarkPrefixKey + '.' + i + '.' + WATERMARK_ROWTIME;
final String exprKey = watermarkPrefixKey + '.' + i + '.' + WATERMARK_STRATEGY_EXPR;
final String typeKey = watermarkPrefixKey + '.' + i + '.' + WATERMARK_STRATEGY_DATA_TYPE;
final String rowtime = optionalGet(rowtimeKey).orElseThrow(exceptionSupplier(rowtimeKey));
final String exprString = optionalGet(exprKey).orElseThrow(exceptionSupplier(exprKey));
final String typeString = optionalGet(typeKey).orElseThrow(exceptionSupplier(typeKey));
final DataType exprType = TypeConversions.fromLogicalToDataType(LogicalTypeParser.parse(typeString));
schemaBuilder.watermark(rowtime, exprString, exprType);
}
}
// Extract unique constraints.
String pkConstraintNameKey = key + '.' + PRIMARY_KEY_NAME;
final Optional<String> pkConstraintNameOpt = optionalGet(pkConstraintNameKey);
if (pkConstraintNameOpt.isPresent()) {
final String pkColumnsKey = key + '.' + PRIMARY_KEY_COLUMNS;
final String columns = optionalGet(pkColumnsKey).orElseThrow(exceptionSupplier(pkColumnsKey));
schemaBuilder.primaryKey(pkConstraintNameOpt.get(), columns.split(","));
}
return Optional.of(schemaBuilder.build());
}
Aggregations