use of org.apache.flink.table.types.logical.FloatType in project flink by apache.
the class ParquetColumnarRowSplitReaderTest method innerTestPartitionValues.
private void innerTestPartitionValues(Path testPath, Map<String, Object> partSpec, boolean nullPartValue) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new BooleanType(), new DateType(), new TimestampType(9), new DoubleType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new VarCharType(VarCharType.MAX_LENGTH) };
ParquetColumnarRowSplitReader reader = ParquetSplitReaderUtil.genPartColumnarRowReader(false, true, new Configuration(), IntStream.range(0, 28).mapToObj(i -> "f" + i).toArray(String[]::new), Arrays.stream(fieldTypes).map(TypeConversions::fromLogicalToDataType).toArray(DataType[]::new), partSpec, new int[] { 7, 2, 4, 15, 19, 20, 21, 22, 23, 18, 16, 17, 24, 25, 26, 27 }, rowGroupSize, new Path(testPath.getPath()), 0, Long.MAX_VALUE);
int i = 0;
while (!reader.reachedEnd()) {
ColumnarRowData row = reader.nextRecord();
// common values
assertEquals(i, row.getDouble(0), 0);
assertEquals((byte) i, row.getByte(1));
assertEquals(i, row.getInt(2));
// partition values
if (nullPartValue) {
for (int j = 3; j < 16; j++) {
assertTrue(row.isNullAt(j));
}
} else {
assertTrue(row.getBoolean(3));
assertEquals(9, row.getByte(4));
assertEquals(10, row.getShort(5));
assertEquals(11, row.getInt(6));
assertEquals(12, row.getLong(7));
assertEquals(13, row.getFloat(8), 0);
assertEquals(6.6, row.getDouble(9), 0);
assertEquals(DateTimeUtils.toInternal(Date.valueOf("2020-11-23")), row.getInt(10));
assertEquals(LocalDateTime.of(1999, 1, 1, 1, 1), row.getTimestamp(11, 9).toLocalDateTime());
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(24), 5, 0), row.getDecimal(12, 5, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(25), 15, 0), row.getDecimal(13, 15, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(26), 20, 0), row.getDecimal(14, 20, 0));
assertEquals("f27", row.getString(15).toString());
}
i++;
}
reader.close();
}
use of org.apache.flink.table.types.logical.FloatType in project flink by apache.
the class ParquetColumnarRowInputFormatTest method innerTestPartitionValues.
private void innerTestPartitionValues(Path testPath, List<String> partitionKeys, boolean nullPartValue) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new BooleanType(), new DateType(), new TimestampType(9), new DoubleType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new VarCharType(VarCharType.MAX_LENGTH) };
RowType rowType = RowType.of(fieldTypes, IntStream.range(0, 28).mapToObj(i -> "f" + i).toArray(String[]::new));
int[] projected = new int[] { 7, 2, 4, 15, 19, 20, 21, 22, 23, 18, 16, 17, 24, 25, 26, 27 };
RowType producedType = new RowType(Arrays.stream(projected).mapToObj(i -> rowType.getFields().get(i)).collect(Collectors.toList()));
ParquetColumnarRowInputFormat<FileSourceSplit> format = ParquetColumnarRowInputFormat.createPartitionedFormat(new Configuration(), producedType, InternalTypeInfo.of(producedType), partitionKeys, PartitionFieldExtractor.forFileSystem("my_default_value"), 500, false, true);
FileStatus fileStatus = testPath.getFileSystem().getFileStatus(testPath);
AtomicInteger cnt = new AtomicInteger(0);
forEachRemaining(format.createReader(EMPTY_CONF, new FileSourceSplit("id", testPath, 0, Long.MAX_VALUE, fileStatus.getModificationTime(), fileStatus.getLen())), row -> {
int i = cnt.get();
// common values
assertEquals(i, row.getDouble(0), 0);
assertEquals((byte) i, row.getByte(1));
assertEquals(i, row.getInt(2));
// partition values
if (nullPartValue) {
for (int j = 3; j < 16; j++) {
assertTrue(row.isNullAt(j));
}
} else {
assertTrue(row.getBoolean(3));
assertEquals(9, row.getByte(4));
assertEquals(10, row.getShort(5));
assertEquals(11, row.getInt(6));
assertEquals(12, row.getLong(7));
assertEquals(13, row.getFloat(8), 0);
assertEquals(6.6, row.getDouble(9), 0);
assertEquals(DateTimeUtils.toInternal(Date.valueOf("2020-11-23")), row.getInt(10));
assertEquals(LocalDateTime.of(1999, 1, 1, 1, 1), row.getTimestamp(11, 9).toLocalDateTime());
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(24), 5, 0), row.getDecimal(12, 5, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(25), 15, 0), row.getDecimal(13, 15, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(26), 20, 0), row.getDecimal(14, 20, 0));
assertEquals("f27", row.getString(15).toString());
}
cnt.incrementAndGet();
});
}
use of org.apache.flink.table.types.logical.FloatType in project flink by apache.
the class ParquetColumnarRowInputFormatTest method testReadingSplit.
private int testReadingSplit(List<Integer> expected, Path path, long splitStart, long splitLength, long seekToRow) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0) };
ParquetColumnarRowInputFormat format = new ParquetColumnarRowInputFormat(new Configuration(), RowType.of(fieldTypes, new String[] { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14" }), null, 500, false, true);
// validate java serialization
try {
InstantiationUtil.clone(format);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
FileStatus fileStatus = path.getFileSystem().getFileStatus(path);
BulkFormat.Reader<RowData> reader = format.restoreReader(EMPTY_CONF, new FileSourceSplit("id", path, splitStart, splitLength, fileStatus.getModificationTime(), fileStatus.getLen(), new String[0], new CheckpointedPosition(CheckpointedPosition.NO_OFFSET, seekToRow)));
AtomicInteger cnt = new AtomicInteger(0);
final AtomicReference<RowData> previousRow = new AtomicReference<>();
forEachRemaining(reader, row -> {
if (previousRow.get() == null) {
previousRow.set(row);
} else {
// ParquetColumnarRowInputFormat should only have one row instance.
assertSame(previousRow.get(), row);
}
Integer v = expected.get(cnt.get());
if (v == null) {
assertTrue(row.isNullAt(0));
assertTrue(row.isNullAt(1));
assertTrue(row.isNullAt(2));
assertTrue(row.isNullAt(3));
assertTrue(row.isNullAt(4));
assertTrue(row.isNullAt(5));
assertTrue(row.isNullAt(6));
assertTrue(row.isNullAt(7));
assertTrue(row.isNullAt(8));
assertTrue(row.isNullAt(9));
assertTrue(row.isNullAt(10));
assertTrue(row.isNullAt(11));
assertTrue(row.isNullAt(12));
assertTrue(row.isNullAt(13));
assertTrue(row.isNullAt(14));
} else {
assertEquals("" + v, row.getString(0).toString());
assertEquals(v % 2 == 0, row.getBoolean(1));
assertEquals(v.byteValue(), row.getByte(2));
assertEquals(v.shortValue(), row.getShort(3));
assertEquals(v.intValue(), row.getInt(4));
assertEquals(v.longValue(), row.getLong(5));
assertEquals(v.floatValue(), row.getFloat(6), 0);
assertEquals(v.doubleValue(), row.getDouble(7), 0);
assertEquals(toDateTime(v), row.getTimestamp(8, 9).toLocalDateTime());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(9, 5, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(10, 15, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(11, 20, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(12, 5, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(13, 15, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(14, 20, 0).toBigDecimal());
}
cnt.incrementAndGet();
});
return cnt.get();
}
use of org.apache.flink.table.types.logical.FloatType in project flink by apache.
the class LogicalTypeJsonSerdeTest method testLogicalTypeSerde.
private static List<LogicalType> testLogicalTypeSerde() {
final List<LogicalType> types = Arrays.asList(new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new DecimalType(10), new DecimalType(15, 5), CharType.ofEmptyLiteral(), new CharType(), new CharType(5), VarCharType.ofEmptyLiteral(), new VarCharType(), new VarCharType(5), BinaryType.ofEmptyLiteral(), new BinaryType(), new BinaryType(100), VarBinaryType.ofEmptyLiteral(), new VarBinaryType(), new VarBinaryType(100), new DateType(), new TimeType(), new TimeType(3), new TimestampType(), new TimestampType(3), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new TimestampType(false, TimestampKind.ROWTIME, 3), new ZonedTimestampType(), new ZonedTimestampType(3), new ZonedTimestampType(false, TimestampKind.ROWTIME, 3), new LocalZonedTimestampType(), new LocalZonedTimestampType(3), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new LocalZonedTimestampType(false, TimestampKind.ROWTIME, 3), new DayTimeIntervalType(DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR), new DayTimeIntervalType(false, DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR, 3, 6), new YearMonthIntervalType(YearMonthIntervalType.YearMonthResolution.YEAR_TO_MONTH), new YearMonthIntervalType(false, YearMonthIntervalType.YearMonthResolution.MONTH, 2), new ZonedTimestampType(), new LocalZonedTimestampType(), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new SymbolType<>(), new ArrayType(new IntType(false)), new ArrayType(new LocalZonedTimestampType(false, TimestampKind.ROWTIME, 3)), new ArrayType(new ZonedTimestampType(false, TimestampKind.ROWTIME, 3)), new ArrayType(new TimestampType()), new ArrayType(CharType.ofEmptyLiteral()), new ArrayType(VarCharType.ofEmptyLiteral()), new ArrayType(BinaryType.ofEmptyLiteral()), new ArrayType(VarBinaryType.ofEmptyLiteral()), new MapType(new BigIntType(), new IntType(false)), new MapType(new TimestampType(false, TimestampKind.ROWTIME, 3), new ZonedTimestampType()), new MapType(CharType.ofEmptyLiteral(), CharType.ofEmptyLiteral()), new MapType(VarCharType.ofEmptyLiteral(), VarCharType.ofEmptyLiteral()), new MapType(BinaryType.ofEmptyLiteral(), BinaryType.ofEmptyLiteral()), new MapType(VarBinaryType.ofEmptyLiteral(), VarBinaryType.ofEmptyLiteral()), new MultisetType(new IntType(false)), new MultisetType(new TimestampType()), new MultisetType(new TimestampType(true, TimestampKind.ROWTIME, 3)), new MultisetType(CharType.ofEmptyLiteral()), new MultisetType(VarCharType.ofEmptyLiteral()), new MultisetType(BinaryType.ofEmptyLiteral()), new MultisetType(VarBinaryType.ofEmptyLiteral()), RowType.of(new BigIntType(), new IntType(false), new VarCharType(200)), RowType.of(new LogicalType[] { new BigIntType(), new IntType(false), new VarCharType(200) }, new String[] { "f1", "f2", "f3" }), RowType.of(new TimestampType(false, TimestampKind.ROWTIME, 3), new TimestampType(false, TimestampKind.REGULAR, 3), new ZonedTimestampType(false, TimestampKind.ROWTIME, 3), new ZonedTimestampType(false, TimestampKind.REGULAR, 3), new LocalZonedTimestampType(false, TimestampKind.ROWTIME, 3), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new LocalZonedTimestampType(false, TimestampKind.REGULAR, 3)), RowType.of(CharType.ofEmptyLiteral(), VarCharType.ofEmptyLiteral(), BinaryType.ofEmptyLiteral(), VarBinaryType.ofEmptyLiteral()), // registered structured type
StructuredType.newBuilder(ObjectIdentifier.of("cat", "db", "structuredType"), PojoClass.class).attributes(Arrays.asList(new StructuredType.StructuredAttribute("f0", new IntType(true)), new StructuredType.StructuredAttribute("f1", new BigIntType(true)), new StructuredType.StructuredAttribute("f2", new VarCharType(200), "desc"))).comparison(StructuredType.StructuredComparison.FULL).setFinal(false).setInstantiable(false).superType(StructuredType.newBuilder(ObjectIdentifier.of("cat", "db", "structuredType2")).attributes(Collections.singletonList(new StructuredType.StructuredAttribute("f0", new BigIntType(false)))).build()).description("description for StructuredType").build(), // unregistered structured type
StructuredType.newBuilder(PojoClass.class).attributes(Arrays.asList(new StructuredType.StructuredAttribute("f0", new IntType(true)), new StructuredType.StructuredAttribute("f1", new BigIntType(true)), new StructuredType.StructuredAttribute("f2", new VarCharType(200), "desc"))).build(), // registered distinct type
DistinctType.newBuilder(ObjectIdentifier.of("cat", "db", "distinctType"), new VarCharType(5)).build(), DistinctType.newBuilder(ObjectIdentifier.of("cat", "db", "distinctType"), new VarCharType(false, 5)).build(), // custom RawType
new RawType<>(LocalDateTime.class, LocalDateTimeSerializer.INSTANCE), // external RawType
new RawType<>(Row.class, ExternalSerializer.of(DataTypes.ROW(DataTypes.INT(), DataTypes.STRING()))));
final List<LogicalType> mutableTypes = new ArrayList<>(types);
// RawType for MapView
addRawTypesForMapView(mutableTypes, new VarCharType(100), new VarCharType(100));
addRawTypesForMapView(mutableTypes, new VarCharType(100), new BigIntType());
addRawTypesForMapView(mutableTypes, new BigIntType(), new VarCharType(100));
addRawTypesForMapView(mutableTypes, new BigIntType(), new BigIntType());
// RawType for ListView
addRawTypesForListView(mutableTypes, new VarCharType(100));
addRawTypesForListView(mutableTypes, new BigIntType());
// RawType for custom MapView
mutableTypes.add(DataViewUtils.adjustDataViews(MapView.newMapViewDataType(DataTypes.STRING().toInternal(), DataTypes.STRING().bridgedTo(byte[].class)), false).getLogicalType());
final List<LogicalType> allTypes = new ArrayList<>();
// consider nullable
for (LogicalType type : mutableTypes) {
allTypes.add(type.copy(true));
allTypes.add(type.copy(false));
}
// ignore nullable for NullType
allTypes.add(new NullType());
return allTypes;
}
use of org.apache.flink.table.types.logical.FloatType in project flink by apache.
the class ArrowReaderWriterTest method init.
@BeforeClass
public static void init() {
fieldTypes.add(new TinyIntType());
fieldTypes.add(new SmallIntType());
fieldTypes.add(new IntType());
fieldTypes.add(new BigIntType());
fieldTypes.add(new BooleanType());
fieldTypes.add(new FloatType());
fieldTypes.add(new DoubleType());
fieldTypes.add(new VarCharType());
fieldTypes.add(new VarBinaryType());
fieldTypes.add(new DecimalType(10, 3));
fieldTypes.add(new DateType());
fieldTypes.add(new TimeType(0));
fieldTypes.add(new TimeType(2));
fieldTypes.add(new TimeType(4));
fieldTypes.add(new TimeType(8));
fieldTypes.add(new LocalZonedTimestampType(0));
fieldTypes.add(new LocalZonedTimestampType(2));
fieldTypes.add(new LocalZonedTimestampType(4));
fieldTypes.add(new LocalZonedTimestampType(8));
fieldTypes.add(new TimestampType(0));
fieldTypes.add(new TimestampType(2));
fieldTypes.add(new TimestampType(4));
fieldTypes.add(new TimestampType(8));
fieldTypes.add(new ArrayType(new VarCharType()));
rowFieldType = new RowType(Arrays.asList(new RowType.RowField("a", new IntType()), new RowType.RowField("b", new VarCharType()), new RowType.RowField("c", new ArrayType(new VarCharType())), new RowType.RowField("d", new TimestampType(2)), new RowType.RowField("e", new RowType(Arrays.asList(new RowType.RowField("e1", new IntType()), new RowType.RowField("e2", new VarCharType()))))));
fieldTypes.add(rowFieldType);
List<RowType.RowField> rowFields = new ArrayList<>();
for (int i = 0; i < fieldTypes.size(); i++) {
rowFields.add(new RowType.RowField("f" + i, fieldTypes.get(i)));
}
rowType = new RowType(rowFields);
allocator = ArrowUtils.getRootAllocator().newChildAllocator("stdout", 0, Long.MAX_VALUE);
}
Aggregations