Search in sources :

Example 6 with DataStructureConverter

use of org.apache.flink.table.data.conversion.DataStructureConverter in project flink by apache.

the class MaterializedCollectStreamResultTest method testLimitedSnapshot.

@Test
public void testLimitedSnapshot() throws Exception {
    final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
    @SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
    // with 3 rows overcommitment
    try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), 2, 3, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
        result.isRetrieving = true;
        result.processRecord(Row.ofKind(RowKind.INSERT, "D", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1));
        result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
        assertRowEquals(Arrays.asList(null, null, Row.ofKind(RowKind.INSERT, "B", 1), // two over-committed rows
        Row.ofKind(RowKind.INSERT, "A", 1)), result.getMaterializedTable(), rowConverter);
        assertEquals(TypedResult.payload(2), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.ofKind(RowKind.INSERT, "B", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.ofKind(RowKind.INSERT, "A", 1)), result.retrievePage(2), rowConverter);
        result.processRecord(Row.ofKind(RowKind.INSERT, "C", 1));
        assertRowEquals(Arrays.asList(Row.ofKind(RowKind.INSERT, "A", 1), // limit clean up has taken place
        Row.ofKind(RowKind.INSERT, "C", 1)), result.getMaterializedTable(), rowConverter);
        result.processRecord(Row.ofKind(RowKind.DELETE, "A", 1));
        assertRowEquals(Collections.singletonList(// regular clean up has taken place
        Row.ofKind(RowKind.INSERT, "C", 1)), result.getMaterializedTable(), rowConverter);
    }
}
Also used : RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) DataStructureConverter(org.apache.flink.table.data.conversion.DataStructureConverter) Row(org.apache.flink.types.Row) TestTableResult(org.apache.flink.table.client.cli.utils.TestTableResult) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

RowData (org.apache.flink.table.data.RowData)6 DataStructureConverter (org.apache.flink.table.data.conversion.DataStructureConverter)6 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)5 Row (org.apache.flink.types.Row)5 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)4 TestTableResult (org.apache.flink.table.client.cli.utils.TestTableResult)4 Test (org.junit.Test)4 RowType (org.apache.flink.table.types.logical.RowType)2 List (java.util.List)1 Function (java.util.function.Function)1 Collectors (java.util.stream.Collectors)1 FlatMapFunction (org.apache.flink.api.common.functions.FlatMapFunction)1 AsyncFunction (org.apache.flink.streaming.api.functions.async.AsyncFunction)1 AsyncWaitOperatorFactory (org.apache.flink.streaming.api.operators.async.AsyncWaitOperatorFactory)1 DataTypeFactory (org.apache.flink.table.catalog.DataTypeFactory)1 DataStructureConverters (org.apache.flink.table.data.conversion.DataStructureConverters)1 LookupJoinCodeGenerator (org.apache.flink.table.planner.codegen.LookupJoinCodeGenerator)1 TableFunctionResultFuture (org.apache.flink.table.runtime.collector.TableFunctionResultFuture)1 AsyncLookupJoinRunner (org.apache.flink.table.runtime.operators.join.lookup.AsyncLookupJoinRunner)1 AsyncLookupJoinWithCalcRunner (org.apache.flink.table.runtime.operators.join.lookup.AsyncLookupJoinWithCalcRunner)1