use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class MergeTableLikeUtilTest method mergeConstraintsFromDerivedTable.
@Test
public void mergeConstraintsFromDerivedTable() {
TableSchema sourceSchema = TableSchema.builder().add(TableColumn.physical("one", DataTypes.INT().notNull())).add(TableColumn.physical("two", DataTypes.STRING().notNull())).add(TableColumn.physical("three", DataTypes.FLOAT())).build();
TableSchema mergedSchema = util.mergeTables(getDefaultMergingStrategies(), sourceSchema, Collections.emptyList(), Collections.emptyList(), primaryKey("one", "two"));
TableSchema expectedSchema = TableSchema.builder().add(TableColumn.physical("one", DataTypes.INT().notNull())).add(TableColumn.physical("two", DataTypes.STRING().notNull())).add(TableColumn.physical("three", DataTypes.FLOAT())).primaryKey("PK_3531879", new String[] { "one", "two" }).build();
assertThat(mergedSchema, equalTo(expectedSchema));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class MergeTableLikeUtilTest method mergeOverwritingWatermarksDuplicate.
@Test
public void mergeOverwritingWatermarksDuplicate() {
TableSchema sourceSchema = TableSchema.builder().add(TableColumn.physical("one", DataTypes.INT())).add(TableColumn.physical("timestamp", DataTypes.TIMESTAMP())).watermark("timestamp", "timestamp - INTERVAL '5' SECOND", DataTypes.TIMESTAMP()).build();
List<SqlWatermark> derivedWatermarkSpecs = Collections.singletonList(new SqlWatermark(SqlParserPos.ZERO, identifier("timestamp"), boundedStrategy("timestamp", "10")));
Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies();
mergingStrategies.put(FeatureOption.WATERMARKS, MergingStrategy.OVERWRITING);
TableSchema mergedSchema = util.mergeTables(mergingStrategies, sourceSchema, Collections.emptyList(), derivedWatermarkSpecs, null);
TableSchema expectedSchema = TableSchema.builder().add(TableColumn.physical("one", DataTypes.INT())).add(TableColumn.physical("timestamp", DataTypes.TIMESTAMP())).watermark("timestamp", "`timestamp` - INTERVAL '10' SECOND", DataTypes.TIMESTAMP()).build();
assertThat(mergedSchema, equalTo(expectedSchema));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class MergeTableLikeUtilTest method mergeConstraintsFromBaseTable.
@Test
public void mergeConstraintsFromBaseTable() {
TableSchema sourceSchema = TableSchema.builder().add(TableColumn.physical("one", DataTypes.INT().notNull())).add(TableColumn.physical("two", DataTypes.STRING().notNull())).add(TableColumn.physical("three", DataTypes.FLOAT())).primaryKey("constraint-42", new String[] { "one", "two" }).build();
TableSchema mergedSchema = util.mergeTables(getDefaultMergingStrategies(), sourceSchema, Collections.emptyList(), Collections.emptyList(), null);
TableSchema expectedSchema = TableSchema.builder().add(TableColumn.physical("one", DataTypes.INT().notNull())).add(TableColumn.physical("two", DataTypes.STRING().notNull())).add(TableColumn.physical("three", DataTypes.FLOAT())).primaryKey("constraint-42", new String[] { "one", "two" }).build();
assertThat(mergedSchema, equalTo(expectedSchema));
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class SchemaValidator method deriveFieldMapping.
/**
* Finds a table source field mapping.
*
* @param properties The properties describing a schema.
* @param inputType The input type that a connector and/or format produces. This parameter can
* be used to resolve a rowtime field against an input field.
*/
public static Map<String, String> deriveFieldMapping(DescriptorProperties properties, Optional<TypeInformation<?>> inputType) {
Map<String, String> mapping = new HashMap<>();
TableSchema schema = properties.getTableSchema(SCHEMA);
List<String> columnNames = new ArrayList<>();
inputType.ifPresent(t -> columnNames.addAll(Arrays.asList(((CompositeType) t).getFieldNames())));
// add all source fields first because rowtime might reference one of them
columnNames.forEach(name -> mapping.put(name, name));
// add all schema fields first for implicit mappings
Arrays.stream(schema.getFieldNames()).forEach(name -> mapping.put(name, name));
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
for (int i = 0; i < names.size(); i++) {
String name = properties.getString(SCHEMA + "." + i + "." + SCHEMA_NAME);
Optional<String> source = properties.getOptionalString(SCHEMA + "." + i + "." + SCHEMA_FROM);
if (source.isPresent()) {
// add explicit mapping
mapping.put(name, source.get());
} else {
// implicit mapping or time
boolean isProctime = properties.getOptionalBoolean(SCHEMA + "." + i + "." + SCHEMA_PROCTIME).orElse(false);
boolean isRowtime = properties.containsKey(SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_TYPE);
boolean isGeneratedColumn = properties.containsKey(SCHEMA + "." + i + "." + EXPR);
// remove proctime/rowtime from mapping
if (isProctime || isRowtime || isGeneratedColumn) {
mapping.remove(name);
} else // check for invalid fields
if (!columnNames.contains(name)) {
throw new ValidationException(format("Could not map the schema field '%s' to a field " + "from source. Please specify the source field from which it can be derived.", name));
}
}
}
return mapping;
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class CatalogTableImpTest method testFromProperties.
@Test
public void testFromProperties() {
TableSchema schema = createTableSchema();
Map<String, String> prop = createProperties();
CatalogTable table = new CatalogTableImpl(schema, createPartitionKeys(), prop, TEST);
CatalogTableImpl tableFromProperties = CatalogTableImpl.fromProperties(table.toProperties());
assertEquals(tableFromProperties.getOptions(), table.getOptions());
assertEquals(tableFromProperties.getPartitionKeys(), table.getPartitionKeys());
assertEquals(tableFromProperties.getSchema(), table.getSchema());
}
Aggregations