Search in sources :

Example 46 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class UpsertKafkaDynamicTableFactory method createKeyValueProjections.

private Tuple2<int[], int[]> createKeyValueProjections(ResolvedCatalogTable catalogTable) {
    ResolvedSchema schema = catalogTable.getResolvedSchema();
    // primary key should validated earlier
    List<String> keyFields = schema.getPrimaryKey().get().getColumns();
    DataType physicalDataType = schema.toPhysicalRowDataType();
    Configuration tableOptions = Configuration.fromMap(catalogTable.getOptions());
    // upsert-kafka will set key.fields to primary key fields by default
    tableOptions.set(KEY_FIELDS, keyFields);
    int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
    int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
    return Tuple2.of(keyProjection, valueProjection);
}
Also used : Configuration(org.apache.flink.configuration.Configuration) DataType(org.apache.flink.table.types.DataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 47 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project zeppelin by apache.

the class Flink113Shims method rowToString.

@Override
public String[] rowToString(Object row, Object table, Object tableConfig) {
    final String zone = ((TableConfig) tableConfig).getConfiguration().get(TableConfigOptions.LOCAL_TIME_ZONE);
    ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
    ResolvedSchema resolvedSchema = ((Table) table).getResolvedSchema();
    return PrintUtils.rowToString((Row) row, resolvedSchema, zoneId);
}
Also used : Table(org.apache.flink.table.api.Table) ZoneId(java.time.ZoneId) AttributedString(org.jline.utils.AttributedString) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 48 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class CliResultViewTest method testResultViewClearResult.

private void testResultViewClearResult(TypedResult<?> typedResult, boolean isTableMode, int expectedCancellationCount) throws Exception {
    final CountDownLatch cancellationCounterLatch = new CountDownLatch(expectedCancellationCount);
    final MockExecutor executor = new MockExecutor(typedResult, cancellationCounterLatch);
    final Configuration testConfig = new Configuration();
    testConfig.set(EXECUTION_RESULT_MODE, ResultMode.TABLE);
    testConfig.set(RUNTIME_MODE, RuntimeExecutionMode.STREAMING);
    String sessionId = executor.openSession("test-session");
    ResolvedSchema schema = ResolvedSchema.of(Column.physical("Null Field", DataTypes.STRING()));
    final ResultDescriptor descriptor = new ResultDescriptor("result-id", schema, false, testConfig, new RowDataToStringConverterImpl(schema.toPhysicalRowDataType()));
    try (CliClient cli = new TestingCliClient(TerminalUtils.createDumbTerminal(), sessionId, executor, File.createTempFile("history", "tmp").toPath(), null)) {
        Thread resultViewRunner = new Thread(new TestingCliResultView(cli, descriptor, isTableMode));
        resultViewRunner.start();
        if (!resultViewRunner.isInterrupted()) {
            resultViewRunner.interrupt();
        }
        // close the client until view exit
        while (resultViewRunner.isAlive()) {
            Thread.sleep(100);
        }
    }
    assertTrue("Invalid number of cancellations.", cancellationCounterLatch.await(10, TimeUnit.SECONDS));
}
Also used : Configuration(org.apache.flink.configuration.Configuration) ResultDescriptor(org.apache.flink.table.client.gateway.ResultDescriptor) AttributedString(org.jline.utils.AttributedString) CountDownLatch(java.util.concurrent.CountDownLatch) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) RowDataToStringConverterImpl(org.apache.flink.table.planner.functions.casting.RowDataToStringConverterImpl)

Example 49 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class MaterializedCollectBatchResultTest method testSnapshot.

@Test
public void testSnapshot() throws Exception {
    final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
    @SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
    try (TestMaterializedCollectBatchResult result = new TestMaterializedCollectBatchResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), Integer.MAX_VALUE, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
        result.isRetrieving = true;
        result.processRecord(Row.of("A", 1));
        result.processRecord(Row.of("B", 1));
        result.processRecord(Row.of("A", 1));
        result.processRecord(Row.of("C", 2));
        assertEquals(TypedResult.payload(4), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter);
        result.processRecord(Row.of("A", 1));
        assertEquals(TypedResult.payload(5), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(5), rowConverter);
    }
}
Also used : RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) DataStructureConverter(org.apache.flink.table.data.conversion.DataStructureConverter) Row(org.apache.flink.types.Row) TestTableResult(org.apache.flink.table.client.cli.utils.TestTableResult) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 50 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class MaterializedCollectBatchResultTest method testLimitedSnapshot.

@Test
public void testLimitedSnapshot() throws Exception {
    final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
    @SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
    try (TestMaterializedCollectBatchResult result = new TestMaterializedCollectBatchResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), // limit the materialized table to 2 rows
    2, 3, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
        // with 3 rows overcommitment
        result.isRetrieving = true;
        result.processRecord(Row.of("D", 1));
        result.processRecord(Row.of("A", 1));
        result.processRecord(Row.of("B", 1));
        result.processRecord(Row.of("A", 1));
        assertRowEquals(Arrays.asList(null, null, Row.of("B", 1), // two over-committed rows
        Row.of("A", 1)), result.getMaterializedTable(), rowConverter);
        assertEquals(TypedResult.payload(2), result.snapshot(1));
        assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(1), rowConverter);
        assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(2), rowConverter);
        result.processRecord(Row.of("C", 1));
        assertRowEquals(// limit clean up has taken place
        Arrays.asList(Row.of("A", 1), Row.of("C", 1)), result.getMaterializedTable(), rowConverter);
        result.processRecord(Row.of("A", 1));
        assertRowEquals(Arrays.asList(null, Row.of("C", 1), Row.of("A", 1)), result.getMaterializedTable(), rowConverter);
    }
}
Also used : RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) DataStructureConverter(org.apache.flink.table.data.conversion.DataStructureConverter) Row(org.apache.flink.types.Row) TestTableResult(org.apache.flink.table.client.cli.utils.TestTableResult) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6