use of org.apache.flink.table.types.logical.BigIntType in project flink by apache.
the class RowTimeSortOperatorTest method testSortOnTwoFields.
@Test
public void testSortOnTwoFields() throws Exception {
InternalTypeInfo<RowData> inputRowType = InternalTypeInfo.ofFields(new IntType(), new BigIntType(), VarCharType.STRING_TYPE, new IntType());
// Note: RowTimeIdx must be 0 in product environment, the value is 1 here just for simplify
// the testing
int rowTimeIdx = 1;
GeneratedRecordComparator gComparator = new GeneratedRecordComparator("", "", new Object[0]) {
private static final long serialVersionUID = -6067266199060901331L;
@Override
public RecordComparator newInstance(ClassLoader classLoader) {
return IntRecordComparator.INSTANCE;
}
};
RowDataHarnessAssertor assertor = new RowDataHarnessAssertor(inputRowType.toRowFieldTypes());
RowTimeSortOperator operator = createSortOperator(inputRowType, rowTimeIdx, gComparator);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(operator);
testHarness.open();
testHarness.processElement(insertRecord(3, 3L, "Hello world", 3));
testHarness.processElement(insertRecord(2, 2L, "Hello", 2));
testHarness.processElement(insertRecord(6, 2L, "Luke Skywalker", 6));
testHarness.processElement(insertRecord(5, 3L, "I am fine.", 5));
testHarness.processElement(insertRecord(7, 1L, "Comment#1", 7));
testHarness.processElement(insertRecord(9, 4L, "Comment#3", 9));
testHarness.processElement(insertRecord(10, 4L, "Comment#4", 10));
testHarness.processElement(insertRecord(8, 4L, "Comment#2", 8));
testHarness.processElement(insertRecord(1, 1L, "Hi", 2));
testHarness.processElement(insertRecord(1, 1L, "Hi", 1));
testHarness.processElement(insertRecord(4, 3L, "Helloworld, how are you?", 4));
testHarness.processElement(insertRecord(4, 5L, "Hello, how are you?", 4));
testHarness.processWatermark(new Watermark(4L));
List<Object> expectedOutput = new ArrayList<>();
expectedOutput.add(insertRecord(1, 1L, "Hi", 2));
expectedOutput.add(insertRecord(1, 1L, "Hi", 1));
expectedOutput.add(insertRecord(7, 1L, "Comment#1", 7));
expectedOutput.add(insertRecord(2, 2L, "Hello", 2));
expectedOutput.add(insertRecord(6, 2L, "Luke Skywalker", 6));
expectedOutput.add(insertRecord(3, 3L, "Hello world", 3));
expectedOutput.add(insertRecord(4, 3L, "Helloworld, how are you?", 4));
expectedOutput.add(insertRecord(5, 3L, "I am fine.", 5));
expectedOutput.add(insertRecord(8, 4L, "Comment#2", 8));
expectedOutput.add(insertRecord(9, 4L, "Comment#3", 9));
expectedOutput.add(insertRecord(10, 4L, "Comment#4", 10));
expectedOutput.add(new Watermark(4L));
// do a snapshot, data could be recovered from state
OperatorSubtaskState snapshot = testHarness.snapshot(0L, 0);
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
testHarness.close();
expectedOutput.clear();
operator = createSortOperator(inputRowType, rowTimeIdx, gComparator);
testHarness = createTestHarness(operator);
testHarness.initializeState(snapshot);
testHarness.open();
// late data will be dropped
testHarness.processElement(insertRecord(5, 3L, "I am fine.", 6));
testHarness.processWatermark(new Watermark(5L));
expectedOutput.add(insertRecord(4, 5L, "Hello, how are you?", 4));
expectedOutput.add(new Watermark(5L));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
// those watermark has no effect
testHarness.processWatermark(new Watermark(11L));
testHarness.processWatermark(new Watermark(12L));
expectedOutput.add(new Watermark(11L));
expectedOutput.add(new Watermark(12L));
assertor.assertOutputEquals("output wrong.", expectedOutput, testHarness.getOutput());
}
use of org.apache.flink.table.types.logical.BigIntType in project flink by apache.
the class WindowOperatorTest method testTumblingCountWindow.
@Test
public void testTumblingCountWindow() throws Exception {
if (!UTC_ZONE_ID.equals(shiftTimeZone)) {
return;
}
closeCalled.set(0);
final int windowSize = 3;
LogicalType[] windowTypes = new LogicalType[] { new BigIntType() };
WindowOperator operator = WindowOperatorBuilder.builder().withInputFields(inputFieldTypes).withShiftTimezone(shiftTimeZone).countWindow(windowSize).aggregateAndBuild(getCountWindowAggFunction(), equaliser, accTypes, aggResultTypes, windowTypes);
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(operator);
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.processElement(insertRecord("key2", 1, 0L));
testHarness.processElement(insertRecord("key2", 2, 1000L));
testHarness.processElement(insertRecord("key2", 3, 2500L));
testHarness.processElement(insertRecord("key1", 1, 10L));
testHarness.processElement(insertRecord("key1", 2, 1000L));
testHarness.processWatermark(new Watermark(12000));
testHarness.setProcessingTime(12000L);
expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 6L, 3L, 0L)));
expectedOutput.add(new Watermark(12000));
assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
// do a snapshot, close and restore again
OperatorSubtaskState snapshotV2 = testHarness.snapshot(0L, 0);
testHarness.close();
expectedOutput.clear();
testHarness = createTestHarness(operator);
testHarness.setup();
testHarness.initializeState(snapshotV2);
testHarness.open();
testHarness.processElement(insertRecord("key1", 2, 2500L));
expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 5L, 3L, 0L)));
assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processElement(insertRecord("key2", 4, 5501L));
testHarness.processElement(insertRecord("key2", 5, 6000L));
testHarness.processElement(insertRecord("key2", 5, 6000L));
testHarness.processElement(insertRecord("key2", 6, 6050L));
expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 14L, 3L, 1L)));
assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processElement(insertRecord("key1", 3, 4000L));
testHarness.processElement(insertRecord("key2", 10, 15000L));
testHarness.processElement(insertRecord("key2", 20, 15000L));
expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key2", 36L, 3L, 2L)));
assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processElement(insertRecord("key1", 2, 2500L));
testHarness.processElement(insertRecord("key1", 2, 2500L));
expectedOutput.addAll(doubleRecord(isTableAggregate, insertRecord("key1", 7L, 3L, 1L)));
assertor.assertOutputEqualsSorted("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.close();
// we close once in the rest...
assertEquals("Close was not called.", 2, closeCalled.get());
}
use of org.apache.flink.table.types.logical.BigIntType in project flink by apache.
the class DynamicTableSourceSpecSerdeTest method testDynamicTableSinkSpecSerde.
public static Stream<DynamicTableSourceSpec> testDynamicTableSinkSpecSerde() {
Map<String, String> options1 = new HashMap<>();
options1.put("connector", FileSystemTableFactory.IDENTIFIER);
options1.put("format", TestCsvFormatFactory.IDENTIFIER);
options1.put("path", "/tmp");
final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
DynamicTableSourceSpec spec1 = new DynamicTableSourceSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
Map<String, String> options2 = new HashMap<>();
options2.put("connector", TestValuesTableFactory.IDENTIFIER);
options2.put("disable-lookup", "true");
options2.put("enable-watermark-push-down", "true");
options2.put("filterable-fields", "b");
options2.put("bounded", "false");
options2.put("readable-metadata", "m1:INT, m2:STRING");
final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("c", DataTypes.STRING()), Column.physical("p", DataTypes.STRING()), Column.metadata("m1", DataTypes.INT(), null, false), Column.metadata("m2", DataTypes.STRING(), null, false), Column.physical("ts", DataTypes.TIMESTAMP(3))), Collections.emptyList(), null);
final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
FlinkTypeFactory factory = FlinkTypeFactory.INSTANCE();
RexBuilder rexBuilder = new RexBuilder(factory);
DynamicTableSourceSpec spec2 = new DynamicTableSourceSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new ProjectPushDownSpec(new int[][] { { 0 }, { 1 }, { 4 }, { 6 } }, RowType.of(new LogicalType[] { new BigIntType(), new IntType(), new IntType(), new TimestampType(3) }, new String[] { "a", "b", "m1", "ts" })), new ReadingMetadataSpec(Arrays.asList("m1", "m2"), RowType.of(new LogicalType[] { new BigIntType(), new IntType(), new IntType(), new TimestampType(3) }, new String[] { "a", "b", "m1", "ts" })), new FilterPushDownSpec(Collections.singletonList(// b >= 10
rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, rexBuilder.makeInputRef(factory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeExactLiteral(new BigDecimal(10))))), new WatermarkPushDownSpec(rexBuilder.makeCall(SqlStdOperatorTable.MINUS, rexBuilder.makeInputRef(factory.createSqlType(SqlTypeName.TIMESTAMP, 3), 3), rexBuilder.makeIntervalLiteral(BigDecimal.valueOf(1000), new SqlIntervalQualifier(TimeUnit.SECOND, 2, TimeUnit.SECOND, 6, SqlParserPos.ZERO))), 5000, RowType.of(new BigIntType(), new IntType(), new IntType(), new TimestampType(false, TimestampKind.ROWTIME, 3))), new SourceWatermarkSpec(true, RowType.of(new BigIntType(), new IntType(), new IntType(), new TimestampType(false, TimestampKind.ROWTIME, 3))), new LimitPushDownSpec(100), new PartitionPushDownSpec(Arrays.asList(new HashMap<String, String>() {
{
put("p", "A");
}
}, new HashMap<String, String>() {
{
put("p", "B");
}
}))));
return Stream.of(spec1, spec2);
}
use of org.apache.flink.table.types.logical.BigIntType in project flink by apache.
the class LogicalTypeJsonSerdeTest method testLogicalTypeSerde.
private static List<LogicalType> testLogicalTypeSerde() {
final List<LogicalType> types = Arrays.asList(new BooleanType(), new TinyIntType(), new SmallIntType(), new IntType(), new BigIntType(), new FloatType(), new DoubleType(), new DecimalType(10), new DecimalType(15, 5), CharType.ofEmptyLiteral(), new CharType(), new CharType(5), VarCharType.ofEmptyLiteral(), new VarCharType(), new VarCharType(5), BinaryType.ofEmptyLiteral(), new BinaryType(), new BinaryType(100), VarBinaryType.ofEmptyLiteral(), new VarBinaryType(), new VarBinaryType(100), new DateType(), new TimeType(), new TimeType(3), new TimestampType(), new TimestampType(3), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new TimestampType(false, TimestampKind.ROWTIME, 3), new ZonedTimestampType(), new ZonedTimestampType(3), new ZonedTimestampType(false, TimestampKind.ROWTIME, 3), new LocalZonedTimestampType(), new LocalZonedTimestampType(3), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new LocalZonedTimestampType(false, TimestampKind.ROWTIME, 3), new DayTimeIntervalType(DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR), new DayTimeIntervalType(false, DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR, 3, 6), new YearMonthIntervalType(YearMonthIntervalType.YearMonthResolution.YEAR_TO_MONTH), new YearMonthIntervalType(false, YearMonthIntervalType.YearMonthResolution.MONTH, 2), new ZonedTimestampType(), new LocalZonedTimestampType(), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new SymbolType<>(), new ArrayType(new IntType(false)), new ArrayType(new LocalZonedTimestampType(false, TimestampKind.ROWTIME, 3)), new ArrayType(new ZonedTimestampType(false, TimestampKind.ROWTIME, 3)), new ArrayType(new TimestampType()), new ArrayType(CharType.ofEmptyLiteral()), new ArrayType(VarCharType.ofEmptyLiteral()), new ArrayType(BinaryType.ofEmptyLiteral()), new ArrayType(VarBinaryType.ofEmptyLiteral()), new MapType(new BigIntType(), new IntType(false)), new MapType(new TimestampType(false, TimestampKind.ROWTIME, 3), new ZonedTimestampType()), new MapType(CharType.ofEmptyLiteral(), CharType.ofEmptyLiteral()), new MapType(VarCharType.ofEmptyLiteral(), VarCharType.ofEmptyLiteral()), new MapType(BinaryType.ofEmptyLiteral(), BinaryType.ofEmptyLiteral()), new MapType(VarBinaryType.ofEmptyLiteral(), VarBinaryType.ofEmptyLiteral()), new MultisetType(new IntType(false)), new MultisetType(new TimestampType()), new MultisetType(new TimestampType(true, TimestampKind.ROWTIME, 3)), new MultisetType(CharType.ofEmptyLiteral()), new MultisetType(VarCharType.ofEmptyLiteral()), new MultisetType(BinaryType.ofEmptyLiteral()), new MultisetType(VarBinaryType.ofEmptyLiteral()), RowType.of(new BigIntType(), new IntType(false), new VarCharType(200)), RowType.of(new LogicalType[] { new BigIntType(), new IntType(false), new VarCharType(200) }, new String[] { "f1", "f2", "f3" }), RowType.of(new TimestampType(false, TimestampKind.ROWTIME, 3), new TimestampType(false, TimestampKind.REGULAR, 3), new ZonedTimestampType(false, TimestampKind.ROWTIME, 3), new ZonedTimestampType(false, TimestampKind.REGULAR, 3), new LocalZonedTimestampType(false, TimestampKind.ROWTIME, 3), new LocalZonedTimestampType(false, TimestampKind.PROCTIME, 3), new LocalZonedTimestampType(false, TimestampKind.REGULAR, 3)), RowType.of(CharType.ofEmptyLiteral(), VarCharType.ofEmptyLiteral(), BinaryType.ofEmptyLiteral(), VarBinaryType.ofEmptyLiteral()), // registered structured type
StructuredType.newBuilder(ObjectIdentifier.of("cat", "db", "structuredType"), PojoClass.class).attributes(Arrays.asList(new StructuredType.StructuredAttribute("f0", new IntType(true)), new StructuredType.StructuredAttribute("f1", new BigIntType(true)), new StructuredType.StructuredAttribute("f2", new VarCharType(200), "desc"))).comparison(StructuredType.StructuredComparison.FULL).setFinal(false).setInstantiable(false).superType(StructuredType.newBuilder(ObjectIdentifier.of("cat", "db", "structuredType2")).attributes(Collections.singletonList(new StructuredType.StructuredAttribute("f0", new BigIntType(false)))).build()).description("description for StructuredType").build(), // unregistered structured type
StructuredType.newBuilder(PojoClass.class).attributes(Arrays.asList(new StructuredType.StructuredAttribute("f0", new IntType(true)), new StructuredType.StructuredAttribute("f1", new BigIntType(true)), new StructuredType.StructuredAttribute("f2", new VarCharType(200), "desc"))).build(), // registered distinct type
DistinctType.newBuilder(ObjectIdentifier.of("cat", "db", "distinctType"), new VarCharType(5)).build(), DistinctType.newBuilder(ObjectIdentifier.of("cat", "db", "distinctType"), new VarCharType(false, 5)).build(), // custom RawType
new RawType<>(LocalDateTime.class, LocalDateTimeSerializer.INSTANCE), // external RawType
new RawType<>(Row.class, ExternalSerializer.of(DataTypes.ROW(DataTypes.INT(), DataTypes.STRING()))));
final List<LogicalType> mutableTypes = new ArrayList<>(types);
// RawType for MapView
addRawTypesForMapView(mutableTypes, new VarCharType(100), new VarCharType(100));
addRawTypesForMapView(mutableTypes, new VarCharType(100), new BigIntType());
addRawTypesForMapView(mutableTypes, new BigIntType(), new VarCharType(100));
addRawTypesForMapView(mutableTypes, new BigIntType(), new BigIntType());
// RawType for ListView
addRawTypesForListView(mutableTypes, new VarCharType(100));
addRawTypesForListView(mutableTypes, new BigIntType());
// RawType for custom MapView
mutableTypes.add(DataViewUtils.adjustDataViews(MapView.newMapViewDataType(DataTypes.STRING().toInternal(), DataTypes.STRING().bridgedTo(byte[].class)), false).getLogicalType());
final List<LogicalType> allTypes = new ArrayList<>();
// consider nullable
for (LogicalType type : mutableTypes) {
allTypes.add(type.copy(true));
allTypes.add(type.copy(false));
}
// ignore nullable for NullType
allTypes.add(new NullType());
return allTypes;
}
use of org.apache.flink.table.types.logical.BigIntType in project flink by apache.
the class DynamicTableSinkSpecSerdeTest method testDynamicTableSinkSpecSerde.
static Stream<DynamicTableSinkSpec> testDynamicTableSinkSpecSerde() {
Map<String, String> options1 = new HashMap<>();
options1.put("connector", FileSystemTableFactory.IDENTIFIER);
options1.put("format", TestCsvFormatFactory.IDENTIFIER);
options1.put("path", "/tmp");
final ResolvedSchema resolvedSchema1 = new ResolvedSchema(Collections.singletonList(Column.physical("a", DataTypes.BIGINT())), Collections.emptyList(), null);
final CatalogTable catalogTable1 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema1).build(), null, Collections.emptyList(), options1);
DynamicTableSinkSpec spec1 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable1, resolvedSchema1)), null);
Map<String, String> options2 = new HashMap<>();
options2.put("connector", FileSystemTableFactory.IDENTIFIER);
options2.put("format", TestCsvFormatFactory.IDENTIFIER);
options2.put("path", "/tmp");
final ResolvedSchema resolvedSchema2 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.physical("p", DataTypes.STRING())), Collections.emptyList(), null);
final CatalogTable catalogTable2 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema2).build(), null, Collections.emptyList(), options2);
DynamicTableSinkSpec spec2 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable2, resolvedSchema2)), Arrays.asList(new OverwriteSpec(true), new PartitioningSpec(new HashMap<String, String>() {
{
put("p", "A");
}
})));
Map<String, String> options3 = new HashMap<>();
options3.put("connector", TestValuesTableFactory.IDENTIFIER);
options3.put("writable-metadata", "m:STRING");
final ResolvedSchema resolvedSchema3 = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT()), Column.physical("b", DataTypes.INT()), Column.metadata("m", DataTypes.STRING(), null, false)), Collections.emptyList(), null);
final CatalogTable catalogTable3 = CatalogTable.of(Schema.newBuilder().fromResolvedSchema(resolvedSchema3).build(), null, Collections.emptyList(), options3);
DynamicTableSinkSpec spec3 = new DynamicTableSinkSpec(ContextResolvedTable.temporary(ObjectIdentifier.of(DEFAULT_BUILTIN_CATALOG, DEFAULT_BUILTIN_DATABASE, "MyTable"), new ResolvedCatalogTable(catalogTable3, resolvedSchema3)), Collections.singletonList(new WritingMetadataSpec(Collections.singletonList("m"), RowType.of(new BigIntType(), new IntType()))));
return Stream.of(spec1, spec2, spec3);
}
Aggregations