Search in sources :

Example 56 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class SetOperationFactory method validateSetOperation.

private void validateSetOperation(SetQueryOperationType operationType, QueryOperation left, QueryOperation right) {
    ResolvedSchema leftSchema = left.getResolvedSchema();
    int leftFieldCount = leftSchema.getColumnCount();
    ResolvedSchema rightSchema = right.getResolvedSchema();
    int rightFieldCount = rightSchema.getColumnCount();
    if (leftFieldCount != rightFieldCount) {
        throw new ValidationException(format("The %s operation on two tables of different column sizes: %d and %d is not supported", operationType.toString().toLowerCase(), leftFieldCount, rightFieldCount));
    }
    final List<DataType> leftDataTypes = leftSchema.getColumnDataTypes();
    final List<DataType> rightDataTypes = rightSchema.getColumnDataTypes();
    IntStream.range(0, leftFieldCount).forEach(idx -> {
        if (!findCommonColumnType(leftDataTypes, rightDataTypes, idx).isPresent()) {
            throw new ValidationException(format("Incompatible types for %s operation. " + "Could not find a common type at position %s for '%s' and '%s'.", operationType.toString().toLowerCase(), idx, leftDataTypes.get(idx), rightDataTypes.get(idx)));
        }
    });
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) DataType(org.apache.flink.table.types.DataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 57 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class SetOperationFactory method createCommonTableSchema.

private ResolvedSchema createCommonTableSchema(QueryOperation left, QueryOperation right) {
    final ResolvedSchema leftSchema = left.getResolvedSchema();
    final List<DataType> leftDataTypes = leftSchema.getColumnDataTypes();
    final List<DataType> rightDataTypes = right.getResolvedSchema().getColumnDataTypes();
    final List<DataType> resultDataTypes = IntStream.range(0, leftSchema.getColumnCount()).mapToObj(idx -> findCommonColumnType(leftDataTypes, rightDataTypes, idx).orElseThrow(AssertionError::new)).map(TypeConversions::fromLogicalToDataType).collect(Collectors.toList());
    return ResolvedSchema.physical(leftSchema.getColumnNames(), resultDataTypes);
}
Also used : DataType(org.apache.flink.table.types.DataType) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 58 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class ValuesOperationFactory method create.

/**
 * Creates a valid {@link ValuesQueryOperation} operation.
 *
 * <p>It derives a row type based on {@link LogicalTypeMerging}. It flattens any row
 * constructors. It does not flatten ROWs which are a result of e.g. a function call.
 *
 * <p>The resulting schema can be provided manually. If it is not, the schema will be
 * automatically derived from the types of the expressions.
 */
QueryOperation create(@Nullable ResolvedSchema expectedSchema, List<ResolvedExpression> resolvedExpressions, ExpressionResolver.PostResolverFactory postResolverFactory) {
    List<List<ResolvedExpression>> resolvedRows = unwrapFromRowConstructor(resolvedExpressions);
    if (expectedSchema != null) {
        verifyAllSameSize(resolvedRows, expectedSchema.getColumnCount());
    }
    ResolvedSchema schema = Optional.ofNullable(expectedSchema).orElseGet(() -> extractSchema(resolvedRows));
    List<List<ResolvedExpression>> castedExpressions = resolvedRows.stream().map(row -> convertTopLevelExpressionToExpectedRowType(postResolverFactory, schema.getColumnDataTypes(), row)).collect(Collectors.toList());
    return new ValuesQueryOperation(castedExpressions, schema);
}
Also used : IntStream(java.util.stream.IntStream) DataType(org.apache.flink.table.types.DataType) QueryOperation(org.apache.flink.table.operations.QueryOperation) ApiExpressionUtils.valueLiteral(org.apache.flink.table.expressions.ApiExpressionUtils.valueLiteral) KeyValueDataType(org.apache.flink.table.types.KeyValueDataType) CallExpression(org.apache.flink.table.expressions.CallExpression) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) LogicalTypeMerging(org.apache.flink.table.types.logical.utils.LogicalTypeMerging) Expression(org.apache.flink.table.expressions.Expression) ExpressionDefaultVisitor(org.apache.flink.table.expressions.ExpressionDefaultVisitor) ArrayList(java.util.ArrayList) NULL(org.apache.flink.table.types.logical.LogicalTypeRoot.NULL) ResolvedExpression(org.apache.flink.table.expressions.ResolvedExpression) FieldsDataType(org.apache.flink.table.types.FieldsDataType) ExpressionResolver(org.apache.flink.table.expressions.resolver.ExpressionResolver) MAP(org.apache.flink.table.types.logical.LogicalTypeRoot.MAP) LogicalTypeCasts.supportsExplicitCast(org.apache.flink.table.types.logical.utils.LogicalTypeCasts.supportsExplicitCast) ValuesQueryOperation(org.apache.flink.table.operations.ValuesQueryOperation) Nullable(javax.annotation.Nullable) LinkedHashSet(java.util.LinkedHashSet) ROW(org.apache.flink.table.types.logical.LogicalTypeRoot.ROW) FunctionDefinition(org.apache.flink.table.functions.FunctionDefinition) BuiltInFunctionDefinitions(org.apache.flink.table.functions.BuiltInFunctionDefinitions) TableException(org.apache.flink.table.api.TableException) Set(java.util.Set) ValueLiteralExpression(org.apache.flink.table.expressions.ValueLiteralExpression) Collectors(java.util.stream.Collectors) List(java.util.List) CollectionDataType(org.apache.flink.table.types.CollectionDataType) LogicalType(org.apache.flink.table.types.logical.LogicalType) ARRAY(org.apache.flink.table.types.logical.LogicalTypeRoot.ARRAY) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) Internal(org.apache.flink.annotation.Internal) TypeConversions(org.apache.flink.table.types.utils.TypeConversions) Collections(java.util.Collections) ValuesQueryOperation(org.apache.flink.table.operations.ValuesQueryOperation) ArrayList(java.util.ArrayList) List(java.util.List) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 59 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class TableEnvironmentImpl method buildResult.

private TableResultInternal buildResult(String[] headers, DataType[] types, Object[][] rows) {
    ResolvedSchema schema = ResolvedSchema.physical(headers, types);
    ResultProvider provider = new StaticResultProvider(Arrays.stream(rows).map(Row::of).collect(Collectors.toList()));
    return TableResultImpl.builder().resultKind(ResultKind.SUCCESS_WITH_CONTENT).schema(ResolvedSchema.physical(headers, types)).resultProvider(provider).setPrintStyle(PrintStyle.tableauWithDataInferredColumnWidths(schema, provider.getRowDataStringConverter(), Integer.MAX_VALUE, true, false)).build();
}
Also used : Row(org.apache.flink.types.Row) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema)

Example 60 with ResolvedSchema

use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.

the class KinesisDynamicTableSinkFactoryTest method testBadTableSinkForCustomPartitionerForPartitionedTable.

@Test
public void testBadTableSinkForCustomPartitionerForPartitionedTable() {
    ResolvedSchema sinkSchema = defaultSinkSchema();
    Map<String, String> sinkOptions = defaultTableOptions().withTableOption(KinesisConnectorOptions.SINK_PARTITIONER, "random").build();
    Assertions.assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSink(sinkSchema, Arrays.asList("name", "curr_id"), sinkOptions)).havingCause().withMessageContaining(String.format("Cannot set %s option for a table defined with a PARTITIONED BY clause", KinesisConnectorOptions.SINK_PARTITIONER.key()));
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Aggregations

ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)84 Test (org.junit.Test)50 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)20 DataType (org.apache.flink.table.types.DataType)20 RowData (org.apache.flink.table.data.RowData)17 ValidationException (org.apache.flink.table.api.ValidationException)14 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)14 List (java.util.List)11 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)10 Column (org.apache.flink.table.catalog.Column)9 LogicalType (org.apache.flink.table.types.logical.LogicalType)9 RowType (org.apache.flink.table.types.logical.RowType)9 HashMap (java.util.HashMap)8 Collectors (java.util.stream.Collectors)8 RelDataType (org.apache.calcite.rel.type.RelDataType)8 Internal (org.apache.flink.annotation.Internal)8 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 FlinkTypeFactory (org.apache.flink.table.planner.calcite.FlinkTypeFactory)6 Row (org.apache.flink.types.Row)6