use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class UpsertKafkaDynamicTableFactory method createKeyValueProjections.
private Tuple2<int[], int[]> createKeyValueProjections(ResolvedCatalogTable catalogTable) {
ResolvedSchema schema = catalogTable.getResolvedSchema();
// primary key should validated earlier
List<String> keyFields = schema.getPrimaryKey().get().getColumns();
DataType physicalDataType = schema.toPhysicalRowDataType();
Configuration tableOptions = Configuration.fromMap(catalogTable.getOptions());
// upsert-kafka will set key.fields to primary key fields by default
tableOptions.set(KEY_FIELDS, keyFields);
int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
return Tuple2.of(keyProjection, valueProjection);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project zeppelin by apache.
the class Flink113Shims method rowToString.
@Override
public String[] rowToString(Object row, Object table, Object tableConfig) {
final String zone = ((TableConfig) tableConfig).getConfiguration().get(TableConfigOptions.LOCAL_TIME_ZONE);
ZoneId zoneId = TableConfigOptions.LOCAL_TIME_ZONE.defaultValue().equals(zone) ? ZoneId.systemDefault() : ZoneId.of(zone);
ResolvedSchema resolvedSchema = ((Table) table).getResolvedSchema();
return PrintUtils.rowToString((Row) row, resolvedSchema, zoneId);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class CliResultViewTest method testResultViewClearResult.
private void testResultViewClearResult(TypedResult<?> typedResult, boolean isTableMode, int expectedCancellationCount) throws Exception {
final CountDownLatch cancellationCounterLatch = new CountDownLatch(expectedCancellationCount);
final MockExecutor executor = new MockExecutor(typedResult, cancellationCounterLatch);
final Configuration testConfig = new Configuration();
testConfig.set(EXECUTION_RESULT_MODE, ResultMode.TABLE);
testConfig.set(RUNTIME_MODE, RuntimeExecutionMode.STREAMING);
String sessionId = executor.openSession("test-session");
ResolvedSchema schema = ResolvedSchema.of(Column.physical("Null Field", DataTypes.STRING()));
final ResultDescriptor descriptor = new ResultDescriptor("result-id", schema, false, testConfig, new RowDataToStringConverterImpl(schema.toPhysicalRowDataType()));
try (CliClient cli = new TestingCliClient(TerminalUtils.createDumbTerminal(), sessionId, executor, File.createTempFile("history", "tmp").toPath(), null)) {
Thread resultViewRunner = new Thread(new TestingCliResultView(cli, descriptor, isTableMode));
resultViewRunner.start();
if (!resultViewRunner.isInterrupted()) {
resultViewRunner.interrupt();
}
// close the client until view exit
while (resultViewRunner.isAlive()) {
Thread.sleep(100);
}
}
assertTrue("Invalid number of cancellations.", cancellationCounterLatch.await(10, TimeUnit.SECONDS));
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class MaterializedCollectBatchResultTest method testSnapshot.
@Test
public void testSnapshot() throws Exception {
final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
@SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
try (TestMaterializedCollectBatchResult result = new TestMaterializedCollectBatchResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), Integer.MAX_VALUE, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
result.isRetrieving = true;
result.processRecord(Row.of("A", 1));
result.processRecord(Row.of("B", 1));
result.processRecord(Row.of("A", 1));
result.processRecord(Row.of("C", 2));
assertEquals(TypedResult.payload(4), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter);
result.processRecord(Row.of("A", 1));
assertEquals(TypedResult.payload(5), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(5), rowConverter);
}
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class MaterializedCollectBatchResultTest method testLimitedSnapshot.
@Test
public void testLimitedSnapshot() throws Exception {
final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
@SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
try (TestMaterializedCollectBatchResult result = new TestMaterializedCollectBatchResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), // limit the materialized table to 2 rows
2, 3, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
// with 3 rows overcommitment
result.isRetrieving = true;
result.processRecord(Row.of("D", 1));
result.processRecord(Row.of("A", 1));
result.processRecord(Row.of("B", 1));
result.processRecord(Row.of("A", 1));
assertRowEquals(Arrays.asList(null, null, Row.of("B", 1), // two over-committed rows
Row.of("A", 1)), result.getMaterializedTable(), rowConverter);
assertEquals(TypedResult.payload(2), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(2), rowConverter);
result.processRecord(Row.of("C", 1));
assertRowEquals(// limit clean up has taken place
Arrays.asList(Row.of("A", 1), Row.of("C", 1)), result.getMaterializedTable(), rowConverter);
result.processRecord(Row.of("A", 1));
assertRowEquals(Arrays.asList(null, Row.of("C", 1), Row.of("A", 1)), result.getMaterializedTable(), rowConverter);
}
}
Aggregations