use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class MaterializedCollectStreamResultTest method testSnapshot.
@Test
public void testSnapshot() throws Exception {
final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
@SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), Integer.MAX_VALUE, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
result.isRetrieving = true;
result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1));
result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
result.processRecord(Row.ofKind(RowKind.INSERT, "C", 2));
assertEquals(TypedResult.payload(4), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(3), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(4), rowConverter);
result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "A", 1));
assertEquals(TypedResult.payload(3), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.of("A", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(2), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("C", 2)), result.retrievePage(3), rowConverter);
result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "C", 2));
result.processRecord(Row.ofKind(RowKind.UPDATE_BEFORE, "A", 1));
result.processRecord(Row.ofKind(RowKind.UPDATE_AFTER, "D", 1));
assertEquals(TypedResult.payload(2), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.of("B", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.of("D", 1)), result.retrievePage(2), rowConverter);
}
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class MaterializedCollectStreamResultTest method testLimitedSnapshot.
@Test
public void testLimitedSnapshot() throws Exception {
final ResolvedSchema schema = ResolvedSchema.physical(new String[] { "f0", "f1" }, new DataType[] { DataTypes.STRING(), DataTypes.INT() });
@SuppressWarnings({ "unchecked", "rawtypes" }) final DataStructureConverter<RowData, Row> rowConverter = (DataStructureConverter) DataStructureConverters.getConverter(schema.toPhysicalRowDataType());
// with 3 rows overcommitment
try (TestMaterializedCollectStreamResult result = new TestMaterializedCollectStreamResult(new TestTableResult(ResultKind.SUCCESS_WITH_CONTENT, schema), 2, 3, createInternalBinaryRowDataConverter(schema.toPhysicalRowDataType()))) {
result.isRetrieving = true;
result.processRecord(Row.ofKind(RowKind.INSERT, "D", 1));
result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
result.processRecord(Row.ofKind(RowKind.INSERT, "B", 1));
result.processRecord(Row.ofKind(RowKind.INSERT, "A", 1));
assertRowEquals(Arrays.asList(null, null, Row.ofKind(RowKind.INSERT, "B", 1), // two over-committed rows
Row.ofKind(RowKind.INSERT, "A", 1)), result.getMaterializedTable(), rowConverter);
assertEquals(TypedResult.payload(2), result.snapshot(1));
assertRowEquals(Collections.singletonList(Row.ofKind(RowKind.INSERT, "B", 1)), result.retrievePage(1), rowConverter);
assertRowEquals(Collections.singletonList(Row.ofKind(RowKind.INSERT, "A", 1)), result.retrievePage(2), rowConverter);
result.processRecord(Row.ofKind(RowKind.INSERT, "C", 1));
assertRowEquals(Arrays.asList(Row.ofKind(RowKind.INSERT, "A", 1), // limit clean up has taken place
Row.ofKind(RowKind.INSERT, "C", 1)), result.getMaterializedTable(), rowConverter);
result.processRecord(Row.ofKind(RowKind.DELETE, "A", 1));
assertRowEquals(Collections.singletonList(// regular clean up has taken place
Row.ofKind(RowKind.INSERT, "C", 1)), result.getMaterializedTable(), rowConverter);
}
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class TimestampExtractorUtils method getAccessedFields.
/**
* Retrieves all field accesses needed for the given {@link TimestampExtractor}.
*
* @param timestampExtractor Extractor for which to construct array of field accesses.
* @param physicalInputType Physical input type that the timestamp extractor accesses.
* @param nameRemapping Additional remapping of a logical to a physical field name.
* TimestampExtractor works with logical names, but accesses physical fields
* @return Array of physical field references.
*/
public static ResolvedFieldReference[] getAccessedFields(TimestampExtractor timestampExtractor, DataType physicalInputType, Function<String, String> nameRemapping) {
final Function<String, ResolvedFieldReference> fieldMapping;
if (LogicalTypeChecks.isCompositeType(physicalInputType.getLogicalType())) {
ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(physicalInputType);
fieldMapping = (arg) -> mapToResolvedField(nameRemapping, schema, arg);
} else {
fieldMapping = (arg) -> new ResolvedFieldReference(arg, TypeConversions.fromDataTypeToLegacyInfo(physicalInputType), 0);
}
return getAccessedFields(timestampExtractor, fieldMapping);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class TableSchema method fromResolvedSchema.
/**
* Helps to migrate to the new {@link ResolvedSchema} to old API methods.
*/
public static TableSchema fromResolvedSchema(ResolvedSchema resolvedSchema) {
final TableSchema.Builder builder = TableSchema.builder();
resolvedSchema.getColumns().stream().map(column -> {
if (column instanceof Column.PhysicalColumn) {
final Column.PhysicalColumn c = (Column.PhysicalColumn) column;
return TableColumn.physical(c.getName(), c.getDataType());
} else if (column instanceof Column.MetadataColumn) {
final Column.MetadataColumn c = (Column.MetadataColumn) column;
return TableColumn.metadata(c.getName(), c.getDataType(), c.getMetadataKey().orElse(null), c.isVirtual());
} else if (column instanceof Column.ComputedColumn) {
final Column.ComputedColumn c = (Column.ComputedColumn) column;
return TableColumn.computed(c.getName(), c.getDataType(), c.getExpression().asSerializableString());
}
throw new IllegalArgumentException("Unsupported column type: " + column);
}).forEach(builder::add);
resolvedSchema.getWatermarkSpecs().forEach(spec -> builder.watermark(spec.getRowtimeAttribute(), spec.getWatermarkExpression().asSerializableString(), spec.getWatermarkExpression().getOutputDataType()));
resolvedSchema.getPrimaryKey().ifPresent(pk -> builder.primaryKey(pk.getName(), pk.getColumns().toArray(new String[0])));
return builder.build();
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class QueryOperationTest method testSummaryString.
@Test
public void testSummaryString() {
ResolvedSchema schema = ResolvedSchema.physical(Collections.singletonList("a"), Collections.singletonList(DataTypes.INT()));
ProjectQueryOperation tableOperation = new ProjectQueryOperation(Collections.singletonList(new FieldReferenceExpression("a", DataTypes.INT(), 0, 0)), new SourceQueryOperation(ContextResolvedTable.temporary(ObjectIdentifier.of("cat1", "db1", "tab1"), new ResolvedCatalogTable(CatalogTable.of(Schema.newBuilder().build(), null, Collections.emptyList(), Collections.emptyMap()), schema))), schema);
SetQueryOperation unionQueryOperation = new SetQueryOperation(tableOperation, tableOperation, SetQueryOperation.SetQueryOperationType.UNION, true, schema);
assertEquals("Union: (all: [true])\n" + " Project: (projections: [a])\n" + " CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])\n" + " Project: (projections: [a])\n" + " CatalogTable: (identifier: [cat1.db1.tab1], fields: [a])", unionQueryOperation.asSummaryString());
}
Aggregations