use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class KafkaConnectorOptionsUtil method createValueFormatProjection.
/**
* Creates an array of indices that determine which physical fields of the table schema to
* include in the value format.
*
* <p>See {@link KafkaConnectorOptions#VALUE_FORMAT}, {@link
* KafkaConnectorOptions#VALUE_FIELDS_INCLUDE}, and {@link
* KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
*/
public static int[] createValueFormatProjection(ReadableConfig options, DataType physicalDataType) {
final LogicalType physicalType = physicalDataType.getLogicalType();
Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
final int physicalFieldCount = LogicalTypeChecks.getFieldCount(physicalType);
final IntStream physicalFields = IntStream.range(0, physicalFieldCount);
final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
final ValueFieldsStrategy strategy = options.get(VALUE_FIELDS_INCLUDE);
if (strategy == ValueFieldsStrategy.ALL) {
if (keyPrefix.length() > 0) {
throw new ValidationException(String.format("A key prefix is not allowed when option '%s' is set to '%s'. " + "Set it to '%s' instead to avoid field overlaps.", VALUE_FIELDS_INCLUDE.key(), ValueFieldsStrategy.ALL, ValueFieldsStrategy.EXCEPT_KEY));
}
return physicalFields.toArray();
} else if (strategy == ValueFieldsStrategy.EXCEPT_KEY) {
final int[] keyProjection = createKeyFormatProjection(options, physicalDataType);
return physicalFields.filter(pos -> IntStream.of(keyProjection).noneMatch(k -> k == pos)).toArray();
}
throw new TableException("Unknown value fields strategy:" + strategy);
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class RowDataToJsonConverters method createArrayConverter.
private RowDataToJsonConverter createArrayConverter(ArrayType type) {
final LogicalType elementType = type.getElementType();
final RowDataToJsonConverter elementConverter = createConverter(elementType);
final ArrayData.ElementGetter elementGetter = ArrayData.createElementGetter(elementType);
return (mapper, reuse, value) -> {
ArrayNode node;
// reuse could be a NullNode if last record is null.
if (reuse == null || reuse.isNull()) {
node = mapper.createArrayNode();
} else {
node = (ArrayNode) reuse;
node.removeAll();
}
ArrayData array = (ArrayData) value;
int numElements = array.size();
for (int i = 0; i < numElements; i++) {
Object element = elementGetter.getElementOrNull(array, i);
node.add(elementConverter.convert(mapper, null, element));
}
return node;
};
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class TableSchema method validateAndCreateNameToTypeMapping.
/**
* Creates a mapping from field name to data type, the field name can be a nested field. This is
* mainly used for validating whether the rowtime attribute (might be nested) exists in the
* schema. During creating, it also validates whether there is duplicate field names.
*
* <p>For example, a "f0" field of ROW type has two nested fields "q1" and "q2". Then the
* mapping will be ["f0" -> ROW, "f0.q1" -> INT, "f0.q2" -> STRING].
*
* <pre>{@code
* f0 ROW<q1 INT, q2 STRING>
* }</pre>
*
* @param fieldNameToType Field name to type mapping that to update
* @param fieldName Name of this field, e.g. "q1" or "q2" in the above example
* @param fieldType Data type of this field
* @param parentFieldName Field name of parent type, e.g. "f0" in the above example
*/
private static void validateAndCreateNameToTypeMapping(Map<String, LogicalType> fieldNameToType, String fieldName, LogicalType fieldType, String parentFieldName) {
String fullFieldName = parentFieldName.isEmpty() ? fieldName : parentFieldName + "." + fieldName;
LogicalType oldType = fieldNameToType.put(fullFieldName, fieldType);
if (oldType != null) {
throw new ValidationException("Field names must be unique. Duplicate field: '" + fullFieldName + "'");
}
if (isCompositeType(fieldType) && !(fieldType instanceof LegacyTypeInformationType)) {
final List<String> fieldNames = LogicalTypeChecks.getFieldNames(fieldType);
final List<LogicalType> fieldTypes = fieldType.getChildren();
IntStream.range(0, fieldNames.size()).forEach(i -> validateAndCreateNameToTypeMapping(fieldNameToType, fieldNames.get(i), fieldTypes.get(i), fullFieldName));
}
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class DataTypeUtilsTest method testExpandDistinctType.
@Test
public void testExpandDistinctType() {
FieldsDataType dataType = (FieldsDataType) ROW(FIELD("f0", INT()), FIELD("f1", STRING()), FIELD("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), FIELD("f3", TIMESTAMP(3)));
LogicalType originalLogicalType = dataType.getLogicalType();
DistinctType distinctLogicalType = DistinctType.newBuilder(ObjectIdentifier.of("catalog", "database", "type"), originalLogicalType).build();
DataType distinctDataType = new FieldsDataType(distinctLogicalType, dataType.getChildren());
ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(distinctDataType);
assertThat(schema).isEqualTo(ResolvedSchema.of(Column.physical("f0", INT()), Column.physical("f1", STRING()), Column.physical("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), Column.physical("f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class))));
}
use of org.apache.flink.table.types.logical.LogicalType in project flink by apache.
the class InternalDataUtils method toGenericRow.
static GenericRowData toGenericRow(RowData rowData, LogicalType logicalType) {
final List<LogicalType> fieldTypes = LogicalTypeChecks.getFieldTypes(logicalType);
final GenericRowData row = new GenericRowData(fieldTypes.size());
row.setRowKind(rowData.getRowKind());
for (int i = 0; i < fieldTypes.size(); i++) {
if (rowData.isNullAt(i)) {
row.setField(i, null);
} else {
LogicalType fieldType = fieldTypes.get(i);
RowData.FieldGetter fieldGetter = RowData.createFieldGetter(fieldType, i);
row.setField(i, toGenericInternalData(fieldGetter.getFieldOrNull(rowData), fieldType));
}
}
return row;
}
Aggregations