use of org.apache.flink.table.api.DataTypes in project flink by apache.
the class ValueDataTypeConverter method extractElementTypeFromValues.
private static Optional<DataType> extractElementTypeFromValues(Object[] array) {
DataType elementType = null;
for (Object element : array) {
// null values are wildcard array elements
if (element == null) {
continue;
}
final Optional<DataType> possibleElementType = extractDataType(element);
if (!possibleElementType.isPresent()) {
return Optional.empty();
}
// for simplification, we assume that array elements can always be nullable
// otherwise mismatches could occur when dealing with nested arrays
final DataType extractedElementType = possibleElementType.get().nullable();
// modification (i.e. adding spaces) which is not intended.
if (elementType != null && !extractedElementType.equals(elementType)) {
return Optional.empty();
}
elementType = extractedElementType;
}
return Optional.ofNullable(elementType).map(DataTypes::ARRAY);
}
use of org.apache.flink.table.api.DataTypes in project flink by apache.
the class FileSystemTableSink method createCompactReaderFactory.
private Optional<CompactReader.Factory<RowData>> createCompactReaderFactory(Context context) {
// Compute producedDataType (including partition fields) and physicalDataType (excluding
// partition fields)
final DataType producedDataType = physicalRowDataType;
final DataType physicalDataType = DataType.getFields(producedDataType).stream().filter(field -> !partitionKeys.contains(field.getName())).collect(Collectors.collectingAndThen(Collectors.toList(), DataTypes::ROW));
if (bulkReaderFormat != null) {
final BulkFormat<RowData, FileSourceSplit> format = new FileInfoExtractorBulkFormat(bulkReaderFormat.createRuntimeDecoder(createSourceContext(context), physicalDataType), producedDataType, context.createTypeInformation(producedDataType), Collections.emptyMap(), partitionKeys, defaultPartName);
return Optional.of(CompactBulkReader.factory(format));
} else if (deserializationFormat != null) {
final DeserializationSchema<RowData> decoder = deserializationFormat.createRuntimeDecoder(createSourceContext(context), physicalDataType);
final BulkFormat<RowData, FileSourceSplit> format = new FileInfoExtractorBulkFormat(new DeserializationSchemaAdapter(decoder), producedDataType, context.createTypeInformation(producedDataType), Collections.emptyMap(), partitionKeys, defaultPartName);
return Optional.of(CompactBulkReader.factory(format));
}
return Optional.empty();
}
Aggregations