use of org.apache.flink.table.types.logical.IntType in project flink by apache.
the class AvroSchemaConverter method extractValueTypeToAvroMap.
public static LogicalType extractValueTypeToAvroMap(LogicalType type) {
LogicalType keyType;
LogicalType valueType;
if (type instanceof MapType) {
MapType mapType = (MapType) type;
keyType = mapType.getKeyType();
valueType = mapType.getValueType();
} else {
MultisetType multisetType = (MultisetType) type;
keyType = multisetType.getElementType();
valueType = new IntType();
}
if (!keyType.is(LogicalTypeFamily.CHARACTER_STRING)) {
throw new UnsupportedOperationException("Avro format doesn't support non-string as key type of map. " + "The key type is: " + keyType.asSummaryString());
}
return valueType;
}
use of org.apache.flink.table.types.logical.IntType in project flink by apache.
the class ParquetColumnarRowSplitReaderTest method testProject.
@Test
public void testProject() throws IOException {
// prepare parquet file
int number = 1000;
List<Row> records = new ArrayList<>(number);
for (int i = 0; i < number; i++) {
Integer v = i;
records.add(newRow(v));
}
Path testPath = createTempParquetFile(TEMPORARY_FOLDER.newFolder(), PARQUET_SCHEMA, records, rowGroupSize);
// test reader
LogicalType[] fieldTypes = new LogicalType[] { new DoubleType(), new TinyIntType(), new IntType() };
ParquetColumnarRowSplitReader reader = new ParquetColumnarRowSplitReader(false, true, new Configuration(), fieldTypes, new String[] { "f7", "f2", "f4" }, VectorizedColumnBatch::new, 500, new org.apache.hadoop.fs.Path(testPath.getPath()), 0, Long.MAX_VALUE);
int i = 0;
while (!reader.reachedEnd()) {
ColumnarRowData row = reader.nextRecord();
assertEquals(i, row.getDouble(0), 0);
assertEquals((byte) i, row.getByte(1));
assertEquals(i, row.getInt(2));
i++;
}
reader.close();
}
use of org.apache.flink.table.types.logical.IntType in project flink by apache.
the class ParquetColumnarRowSplitReaderTest method innerTestPartitionValues.
private void innerTestPartitionValues(Path testPath, Map<String, Object> partSpec, boolean nullPartValue) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new BooleanType(), new DateType(), new TimestampType(9), new DoubleType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new VarCharType(VarCharType.MAX_LENGTH) };
ParquetColumnarRowSplitReader reader = ParquetSplitReaderUtil.genPartColumnarRowReader(false, true, new Configuration(), IntStream.range(0, 28).mapToObj(i -> "f" + i).toArray(String[]::new), Arrays.stream(fieldTypes).map(TypeConversions::fromLogicalToDataType).toArray(DataType[]::new), partSpec, new int[] { 7, 2, 4, 15, 19, 20, 21, 22, 23, 18, 16, 17, 24, 25, 26, 27 }, rowGroupSize, new Path(testPath.getPath()), 0, Long.MAX_VALUE);
int i = 0;
while (!reader.reachedEnd()) {
ColumnarRowData row = reader.nextRecord();
// common values
assertEquals(i, row.getDouble(0), 0);
assertEquals((byte) i, row.getByte(1));
assertEquals(i, row.getInt(2));
// partition values
if (nullPartValue) {
for (int j = 3; j < 16; j++) {
assertTrue(row.isNullAt(j));
}
} else {
assertTrue(row.getBoolean(3));
assertEquals(9, row.getByte(4));
assertEquals(10, row.getShort(5));
assertEquals(11, row.getInt(6));
assertEquals(12, row.getLong(7));
assertEquals(13, row.getFloat(8), 0);
assertEquals(6.6, row.getDouble(9), 0);
assertEquals(DateTimeUtils.toInternal(Date.valueOf("2020-11-23")), row.getInt(10));
assertEquals(LocalDateTime.of(1999, 1, 1, 1, 1), row.getTimestamp(11, 9).toLocalDateTime());
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(24), 5, 0), row.getDecimal(12, 5, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(25), 15, 0), row.getDecimal(13, 15, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(26), 20, 0), row.getDecimal(14, 20, 0));
assertEquals("f27", row.getString(15).toString());
}
i++;
}
reader.close();
}
use of org.apache.flink.table.types.logical.IntType in project flink by apache.
the class ParquetColumnarRowInputFormatTest method innerTestPartitionValues.
private void innerTestPartitionValues(Path testPath, List<String> partitionKeys, boolean nullPartValue) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new BooleanType(), new DateType(), new TimestampType(9), new DoubleType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new VarCharType(VarCharType.MAX_LENGTH) };
RowType rowType = RowType.of(fieldTypes, IntStream.range(0, 28).mapToObj(i -> "f" + i).toArray(String[]::new));
int[] projected = new int[] { 7, 2, 4, 15, 19, 20, 21, 22, 23, 18, 16, 17, 24, 25, 26, 27 };
RowType producedType = new RowType(Arrays.stream(projected).mapToObj(i -> rowType.getFields().get(i)).collect(Collectors.toList()));
ParquetColumnarRowInputFormat<FileSourceSplit> format = ParquetColumnarRowInputFormat.createPartitionedFormat(new Configuration(), producedType, InternalTypeInfo.of(producedType), partitionKeys, PartitionFieldExtractor.forFileSystem("my_default_value"), 500, false, true);
FileStatus fileStatus = testPath.getFileSystem().getFileStatus(testPath);
AtomicInteger cnt = new AtomicInteger(0);
forEachRemaining(format.createReader(EMPTY_CONF, new FileSourceSplit("id", testPath, 0, Long.MAX_VALUE, fileStatus.getModificationTime(), fileStatus.getLen())), row -> {
int i = cnt.get();
// common values
assertEquals(i, row.getDouble(0), 0);
assertEquals((byte) i, row.getByte(1));
assertEquals(i, row.getInt(2));
// partition values
if (nullPartValue) {
for (int j = 3; j < 16; j++) {
assertTrue(row.isNullAt(j));
}
} else {
assertTrue(row.getBoolean(3));
assertEquals(9, row.getByte(4));
assertEquals(10, row.getShort(5));
assertEquals(11, row.getInt(6));
assertEquals(12, row.getLong(7));
assertEquals(13, row.getFloat(8), 0);
assertEquals(6.6, row.getDouble(9), 0);
assertEquals(DateTimeUtils.toInternal(Date.valueOf("2020-11-23")), row.getInt(10));
assertEquals(LocalDateTime.of(1999, 1, 1, 1, 1), row.getTimestamp(11, 9).toLocalDateTime());
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(24), 5, 0), row.getDecimal(12, 5, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(25), 15, 0), row.getDecimal(13, 15, 0));
assertEquals(DecimalData.fromBigDecimal(new BigDecimal(26), 20, 0), row.getDecimal(14, 20, 0));
assertEquals("f27", row.getString(15).toString());
}
cnt.incrementAndGet();
});
}
use of org.apache.flink.table.types.logical.IntType in project flink by apache.
the class ParquetColumnarRowInputFormatTest method testReadingSplit.
private int testReadingSplit(List<Integer> expected, Path path, long splitStart, long splitLength, long seekToRow) throws IOException {
LogicalType[] fieldTypes = new LogicalType[] { new VarCharType(VarCharType.MAX_LENGTH), new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new TimestampType(9), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0), new DecimalType(5, 0), new DecimalType(15, 0), new DecimalType(20, 0) };
ParquetColumnarRowInputFormat format = new ParquetColumnarRowInputFormat(new Configuration(), RowType.of(fieldTypes, new String[] { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14" }), null, 500, false, true);
// validate java serialization
try {
InstantiationUtil.clone(format);
} catch (ClassNotFoundException e) {
throw new IOException(e);
}
FileStatus fileStatus = path.getFileSystem().getFileStatus(path);
BulkFormat.Reader<RowData> reader = format.restoreReader(EMPTY_CONF, new FileSourceSplit("id", path, splitStart, splitLength, fileStatus.getModificationTime(), fileStatus.getLen(), new String[0], new CheckpointedPosition(CheckpointedPosition.NO_OFFSET, seekToRow)));
AtomicInteger cnt = new AtomicInteger(0);
final AtomicReference<RowData> previousRow = new AtomicReference<>();
forEachRemaining(reader, row -> {
if (previousRow.get() == null) {
previousRow.set(row);
} else {
// ParquetColumnarRowInputFormat should only have one row instance.
assertSame(previousRow.get(), row);
}
Integer v = expected.get(cnt.get());
if (v == null) {
assertTrue(row.isNullAt(0));
assertTrue(row.isNullAt(1));
assertTrue(row.isNullAt(2));
assertTrue(row.isNullAt(3));
assertTrue(row.isNullAt(4));
assertTrue(row.isNullAt(5));
assertTrue(row.isNullAt(6));
assertTrue(row.isNullAt(7));
assertTrue(row.isNullAt(8));
assertTrue(row.isNullAt(9));
assertTrue(row.isNullAt(10));
assertTrue(row.isNullAt(11));
assertTrue(row.isNullAt(12));
assertTrue(row.isNullAt(13));
assertTrue(row.isNullAt(14));
} else {
assertEquals("" + v, row.getString(0).toString());
assertEquals(v % 2 == 0, row.getBoolean(1));
assertEquals(v.byteValue(), row.getByte(2));
assertEquals(v.shortValue(), row.getShort(3));
assertEquals(v.intValue(), row.getInt(4));
assertEquals(v.longValue(), row.getLong(5));
assertEquals(v.floatValue(), row.getFloat(6), 0);
assertEquals(v.doubleValue(), row.getDouble(7), 0);
assertEquals(toDateTime(v), row.getTimestamp(8, 9).toLocalDateTime());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(9, 5, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(10, 15, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(11, 20, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(12, 5, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(13, 15, 0).toBigDecimal());
assertEquals(BigDecimal.valueOf(v), row.getDecimal(14, 20, 0).toBigDecimal());
}
cnt.incrementAndGet();
});
return cnt.get();
}
Aggregations