use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class StreamExecDataStreamScan method getRowtimeExpression.
private Optional<RexNode> getRowtimeExpression(FlinkRelBuilder relBuilder) {
final List<Integer> fields = Arrays.stream(fieldIndexes).boxed().collect(Collectors.toList());
if (!fields.contains(ROWTIME_STREAM_MARKER)) {
return Optional.empty();
} else {
String rowtimeField = fieldNames[fields.indexOf(ROWTIME_STREAM_MARKER)];
// get expression to extract timestamp
LogicalType logicalType = fromDataTypeToLogicalType(sourceType);
if (logicalType instanceof RowType) {
RowType rowType = (RowType) logicalType;
if (rowType.getFieldNames().contains(rowtimeField) && TypeCheckUtils.isRowTime(rowType.getTypeAt(rowType.getFieldIndex(rowtimeField)))) {
// if rowtimeField already existed in the data stream, use the default rowtime
return Optional.empty();
}
}
return Optional.of(relBuilder.cast(relBuilder.call(FlinkSqlOperatorTable.STREAMRECORD_TIMESTAMP), relBuilder.getTypeFactory().createFieldTypeFromLogicalType(new TimestampType(true, TimestampKind.ROWTIME, 3)).getSqlTypeName()));
}
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class AbstractJdbcRowConverterTest method testExternalLocalDateTimeToTimestamp.
@Test
public void testExternalLocalDateTimeToTimestamp() throws Exception {
RowType rowType = RowType.of(new IntType(), new TimestampType(3));
JdbcRowConverter rowConverter = new AbstractJdbcRowConverter(rowType) {
private static final long serialVersionUID = 1L;
@Override
public String converterName() {
return "test";
}
};
ResultSet resultSet = Mockito.mock(ResultSet.class);
Mockito.when(resultSet.getObject(1)).thenReturn(123);
Mockito.when(resultSet.getObject(2)).thenReturn(LocalDateTime.parse("2021-04-07T00:00:05.999"));
RowData res = rowConverter.toInternal(resultSet);
assertEquals(123, res.getInt(0));
assertEquals(LocalDateTime.parse("2021-04-07T00:00:05.999"), res.getTimestamp(1, 3).toLocalDateTime());
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class OrcBulkRowDataWriterTest method initInput.
@Before
public void initInput() {
input = new ArrayList<>();
fieldTypes = new LogicalType[4];
fieldTypes[0] = new VarCharType();
fieldTypes[1] = new IntType();
List<RowType.RowField> arrayRowFieldList = Collections.singletonList(new RowType.RowField("_col2_col0", new VarCharType()));
fieldTypes[2] = new ArrayType(new RowType(arrayRowFieldList));
List<RowType.RowField> mapRowFieldList = Arrays.asList(new RowType.RowField("_col3_col0", new VarCharType()), new RowType.RowField("_col3_col1", new TimestampType()));
fieldTypes[3] = new MapType(new VarCharType(), new RowType(mapRowFieldList));
{
GenericRowData rowData = new GenericRowData(4);
rowData.setField(0, new BinaryStringData("_col_0_string_1"));
rowData.setField(1, 1);
GenericRowData arrayValue1 = new GenericRowData(1);
arrayValue1.setField(0, new BinaryStringData("_col_2_row_0_string_1"));
GenericRowData arrayValue2 = new GenericRowData(1);
arrayValue2.setField(0, new BinaryStringData("_col_2_row_1_string_1"));
GenericArrayData arrayData = new GenericArrayData(new Object[] { arrayValue1, arrayValue2 });
rowData.setField(2, arrayData);
GenericRowData mapValue1 = new GenericRowData(2);
mapValue1.setField(0, new BinaryStringData(("_col_3_map_value_string_1")));
mapValue1.setField(1, TimestampData.fromTimestamp(new Timestamp(3600000)));
Map<StringData, RowData> mapDataMap = new HashMap<>();
mapDataMap.put(new BinaryStringData("_col_3_map_key_1"), mapValue1);
GenericMapData mapData = new GenericMapData(mapDataMap);
rowData.setField(3, mapData);
input.add(rowData);
}
{
GenericRowData rowData = new GenericRowData(4);
rowData.setField(0, new BinaryStringData("_col_0_string_2"));
rowData.setField(1, 2);
GenericRowData arrayValue1 = new GenericRowData(1);
arrayValue1.setField(0, new BinaryStringData("_col_2_row_0_string_2"));
GenericRowData arrayValue2 = new GenericRowData(1);
arrayValue2.setField(0, new BinaryStringData("_col_2_row_1_string_2"));
GenericArrayData arrayData = new GenericArrayData(new Object[] { arrayValue1, arrayValue2 });
rowData.setField(2, arrayData);
GenericRowData mapValue1 = new GenericRowData(2);
mapValue1.setField(0, new BinaryStringData(("_col_3_map_value_string_2")));
mapValue1.setField(1, TimestampData.fromTimestamp(new Timestamp(3600000)));
Map<StringData, RowData> mapDataMap = new HashMap<>();
mapDataMap.put(new BinaryStringData("_col_3_map_key_2"), mapValue1);
GenericMapData mapData = new GenericMapData(mapDataMap);
rowData.setField(3, mapData);
input.add(rowData);
}
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class OrcFileSystemITCase method initNestedTypesFile.
private String initNestedTypesFile(List<RowData> data) throws Exception {
LogicalType[] fieldTypes = new LogicalType[4];
fieldTypes[0] = new VarCharType();
fieldTypes[1] = new IntType();
List<RowType.RowField> arrayRowFieldList = Collections.singletonList(new RowType.RowField("_col2_col0", new VarCharType()));
fieldTypes[2] = new ArrayType(new RowType(arrayRowFieldList));
List<RowType.RowField> mapRowFieldList = Arrays.asList(new RowType.RowField("_col3_col0", new VarCharType()), new RowType.RowField("_col3_col1", new TimestampType()));
fieldTypes[3] = new MapType(new VarCharType(), new RowType(mapRowFieldList));
String schema = "struct<_col0:string,_col1:int,_col2:array<struct<_col2_col0:string>>," + "_col3:map<string,struct<_col3_col0:string,_col3_col1:timestamp>>>";
File outDir = TEMPORARY_FOLDER.newFolder();
Properties writerProps = new Properties();
writerProps.setProperty("orc.compress", "LZ4");
final OrcBulkWriterFactory<RowData> writer = new OrcBulkWriterFactory<>(new RowDataVectorizer(schema, fieldTypes), writerProps, new Configuration());
StreamingFileSink<RowData> sink = StreamingFileSink.forBulkFormat(new org.apache.flink.core.fs.Path(outDir.toURI()), writer).withBucketCheckInterval(10000).build();
try (OneInputStreamOperatorTestHarness<RowData, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink), 1, 1, 0)) {
testHarness.setup();
testHarness.open();
int time = 0;
for (final RowData record : data) {
testHarness.processElement(record, ++time);
}
testHarness.snapshot(1, ++time);
testHarness.notifyOfCompletedCheckpoint(1);
}
return outDir.getAbsolutePath();
}
use of org.apache.flink.table.types.logical.TimestampType in project flink by apache.
the class ArrowReaderWriterTest method init.
@BeforeClass
public static void init() {
fieldTypes.add(new TinyIntType());
fieldTypes.add(new SmallIntType());
fieldTypes.add(new IntType());
fieldTypes.add(new BigIntType());
fieldTypes.add(new BooleanType());
fieldTypes.add(new FloatType());
fieldTypes.add(new DoubleType());
fieldTypes.add(new VarCharType());
fieldTypes.add(new VarBinaryType());
fieldTypes.add(new DecimalType(10, 3));
fieldTypes.add(new DateType());
fieldTypes.add(new TimeType(0));
fieldTypes.add(new TimeType(2));
fieldTypes.add(new TimeType(4));
fieldTypes.add(new TimeType(8));
fieldTypes.add(new LocalZonedTimestampType(0));
fieldTypes.add(new LocalZonedTimestampType(2));
fieldTypes.add(new LocalZonedTimestampType(4));
fieldTypes.add(new LocalZonedTimestampType(8));
fieldTypes.add(new TimestampType(0));
fieldTypes.add(new TimestampType(2));
fieldTypes.add(new TimestampType(4));
fieldTypes.add(new TimestampType(8));
fieldTypes.add(new ArrayType(new VarCharType()));
rowFieldType = new RowType(Arrays.asList(new RowType.RowField("a", new IntType()), new RowType.RowField("b", new VarCharType()), new RowType.RowField("c", new ArrayType(new VarCharType())), new RowType.RowField("d", new TimestampType(2)), new RowType.RowField("e", new RowType(Arrays.asList(new RowType.RowField("e1", new IntType()), new RowType.RowField("e2", new VarCharType()))))));
fieldTypes.add(rowFieldType);
List<RowType.RowField> rowFields = new ArrayList<>();
for (int i = 0; i < fieldTypes.size(); i++) {
rowFields.add(new RowType.RowField("f" + i, fieldTypes.get(i)));
}
rowType = new RowType(rowFields);
allocator = ArrowUtils.getRootAllocator().newChildAllocator("stdout", 0, Long.MAX_VALUE);
}
Aggregations