use of org.apache.flink.table.types.utils.TypeConversions in project flink by apache.
the class ParquetColumnarRowSplitReaderTest method innerTestPartitionValues.
private void innerTestPartitionValues(Path testPath, Map<String, Object> partSpec, boolean nullPartValue) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new BooleanType(), new DateType(), new TimestampType(9), new DoubleType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new VarCharType(VarCharType.MAX_LENGTH) };
ParquetColumnarRowSplitReader reader = ParquetSplitReaderUtil.genPartColumnarRowReader(false, true, new Configuration(), IntStream.range(0, 28).mapToObj(i -> "f" + i).toArray(String[]::new), Arrays.stream(fieldTypes).map(TypeConversions::fromLogicalToDataType).toArray(DataType[]::new), partSpec, new int[] { 7, 2, 4, 15, 19, 20, 21, 22, 23, 18, 16, 17, 24, 25, 26, 27 }, rowGroupSize, new Path(testPath.getPath()), 0, Long.MAX_VALUE);
int i = 0;
while (!reader.reachedEnd()) {
ColumnarRowData row = reader.nextRecord();
// common values
assertEquals(i, row.getDouble(0), 0);
assertEquals((byte) i, row.getByte(1));
assertEquals(i, row.getInt(2));
// partition values
if (nullPartValue) {
for (int j = 3; j < 16; j++) {
assertTrue(row.isNullAt(j));
}
} else {
assertTrue(row.getBoolean(3));
assertEquals(9, row.getByte(4));
assertEquals(10, row.getShort(5));
assertEquals(11, row.getInt(6));
assertEquals(12, row.getLong(7));
assertEquals(13, row.getFloat(8), 0);
assertEquals(6.6, row.getDouble(9), 0);
assertEquals(DateTimeUtils.toInternal(Date.valueOf("2020-11-23")), row.getInt(10));
assertEquals(LocalDateTime.of(1999, 1, 1, 1, 1), row.getTimestamp(11, 9).toLocalDateTime());
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(24), 5, 0), row.getDecimal(12, 5, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(25), 15, 0), row.getDecimal(13, 15, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(26), 20, 0), row.getDecimal(14, 20, 0));
assertEquals("f27", row.getString(15).toString());
}
i++;
}
reader.close();
}
use of org.apache.flink.table.types.utils.TypeConversions in project flink by apache.
the class StringConcatTypeStrategy method inferType.
@Override
public Optional<DataType> inferType(CallContext callContext) {
final List<DataType> argumentDataTypes = callContext.getArgumentDataTypes();
final LogicalType type1 = argumentDataTypes.get(0).getLogicalType();
final LogicalType type2 = argumentDataTypes.get(1).getLogicalType();
int length = getLength(type1) + getLength(type2);
// handle overflow
if (length < 0) {
length = CharType.MAX_LENGTH;
}
final LogicalType minimumType;
if (type1.is(LogicalTypeFamily.CHARACTER_STRING) || type2.is(LogicalTypeFamily.CHARACTER_STRING)) {
minimumType = new CharType(false, length);
} else if (type1.is(LogicalTypeFamily.BINARY_STRING) || type2.is(LogicalTypeFamily.BINARY_STRING)) {
minimumType = new BinaryType(false, length);
} else {
return Optional.empty();
}
// deal with nullability handling and varying semantics
return findCommonType(Arrays.asList(type1, type2, minimumType)).map(TypeConversions::fromLogicalToDataType);
}
use of org.apache.flink.table.types.utils.TypeConversions in project flink by apache.
the class ParquetRowDataWriterTest method innerTest.
private void innerTest(Configuration conf, boolean utcTimestamp) throws IOException {
Path path = new Path(TEMPORARY_FOLDER.newFolder().getPath(), UUID.randomUUID().toString());
int number = 1000;
List<Row> rows = new ArrayList<>(number);
for (int i = 0; i < number; i++) {
Integer v = i;
rows.add(Row.of(String.valueOf(v), String.valueOf(v).getBytes(StandardCharsets.UTF_8), v % 2 == 0, v.byteValue(), v.shortValue(), v, v.longValue(), v.floatValue(), v.doubleValue(), toDateTime(v), BigDecimal.valueOf(v), BigDecimal.valueOf(v), BigDecimal.valueOf(v)));
}
ParquetWriterFactory<RowData> factory = ParquetRowDataBuilder.createWriterFactory(ROW_TYPE, conf, utcTimestamp);
BulkWriter<RowData> writer = factory.create(path.getFileSystem().create(path, FileSystem.WriteMode.OVERWRITE));
for (int i = 0; i < number; i++) {
writer.addElement(CONVERTER.toInternal(rows.get(i)));
}
writer.flush();
writer.finish();
// verify
ParquetColumnarRowSplitReader reader = ParquetSplitReaderUtil.genPartColumnarRowReader(utcTimestamp, true, conf, ROW_TYPE.getFieldNames().toArray(new String[0]), ROW_TYPE.getChildren().stream().map(TypeConversions::fromLogicalToDataType).toArray(DataType[]::new), new HashMap<>(), IntStream.range(0, ROW_TYPE.getFieldCount()).toArray(), 50, path, 0, Long.MAX_VALUE);
int cnt = 0;
while (!reader.reachedEnd()) {
Row row = CONVERTER.toExternal(reader.nextRecord());
Assert.assertEquals(rows.get(cnt), row);
cnt++;
}
Assert.assertEquals(number, cnt);
}
use of org.apache.flink.table.types.utils.TypeConversions in project flink by apache.
the class AggregateOperationFactory method extractAggregateResultDataTypes.
/**
* Extract result types for the aggregate or the table aggregate expression. For a table
* aggregate, it may return multi result types when the composite return type is flattened.
*/
private Stream<DataType> extractAggregateResultDataTypes(ResolvedExpression expression) {
if (isFunctionOfKind(expression, TABLE_AGGREGATE)) {
final DataType outputDataType = expression.getOutputDataType();
final LogicalType outputType = expression.getOutputDataType().getLogicalType();
// legacy
if (outputType instanceof LegacyTypeInformationType) {
final TypeInformation<?> legacyInfo = TypeConversions.fromDataTypeToLegacyInfo(expression.getOutputDataType());
return Stream.of(FieldInfoUtils.getFieldTypes(legacyInfo)).map(TypeConversions::fromLegacyInfoToDataType);
}
return DataTypeUtils.flattenToDataTypes(outputDataType).stream();
} else {
return Stream.of(expression.getOutputDataType());
}
}
Aggregations