use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class SetOperationFactory method validateSetOperation.
private void validateSetOperation(SetQueryOperationType operationType, QueryOperation left, QueryOperation right) {
ResolvedSchema leftSchema = left.getResolvedSchema();
int leftFieldCount = leftSchema.getColumnCount();
ResolvedSchema rightSchema = right.getResolvedSchema();
int rightFieldCount = rightSchema.getColumnCount();
if (leftFieldCount != rightFieldCount) {
throw new ValidationException(format("The %s operation on two tables of different column sizes: %d and %d is not supported", operationType.toString().toLowerCase(), leftFieldCount, rightFieldCount));
}
final List<DataType> leftDataTypes = leftSchema.getColumnDataTypes();
final List<DataType> rightDataTypes = rightSchema.getColumnDataTypes();
IntStream.range(0, leftFieldCount).forEach(idx -> {
if (!findCommonColumnType(leftDataTypes, rightDataTypes, idx).isPresent()) {
throw new ValidationException(format("Incompatible types for %s operation. " + "Could not find a common type at position %s for '%s' and '%s'.", operationType.toString().toLowerCase(), idx, leftDataTypes.get(idx), rightDataTypes.get(idx)));
}
});
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class SetOperationFactory method createCommonTableSchema.
private ResolvedSchema createCommonTableSchema(QueryOperation left, QueryOperation right) {
final ResolvedSchema leftSchema = left.getResolvedSchema();
final List<DataType> leftDataTypes = leftSchema.getColumnDataTypes();
final List<DataType> rightDataTypes = right.getResolvedSchema().getColumnDataTypes();
final List<DataType> resultDataTypes = IntStream.range(0, leftSchema.getColumnCount()).mapToObj(idx -> findCommonColumnType(leftDataTypes, rightDataTypes, idx).orElseThrow(AssertionError::new)).map(TypeConversions::fromLogicalToDataType).collect(Collectors.toList());
return ResolvedSchema.physical(leftSchema.getColumnNames(), resultDataTypes);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class ValuesOperationFactory method create.
/**
* Creates a valid {@link ValuesQueryOperation} operation.
*
* <p>It derives a row type based on {@link LogicalTypeMerging}. It flattens any row
* constructors. It does not flatten ROWs which are a result of e.g. a function call.
*
* <p>The resulting schema can be provided manually. If it is not, the schema will be
* automatically derived from the types of the expressions.
*/
QueryOperation create(@Nullable ResolvedSchema expectedSchema, List<ResolvedExpression> resolvedExpressions, ExpressionResolver.PostResolverFactory postResolverFactory) {
List<List<ResolvedExpression>> resolvedRows = unwrapFromRowConstructor(resolvedExpressions);
if (expectedSchema != null) {
verifyAllSameSize(resolvedRows, expectedSchema.getColumnCount());
}
ResolvedSchema schema = Optional.ofNullable(expectedSchema).orElseGet(() -> extractSchema(resolvedRows));
List<List<ResolvedExpression>> castedExpressions = resolvedRows.stream().map(row -> convertTopLevelExpressionToExpectedRowType(postResolverFactory, schema.getColumnDataTypes(), row)).collect(Collectors.toList());
return new ValuesQueryOperation(castedExpressions, schema);
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class TableEnvironmentImpl method buildResult.
private TableResultInternal buildResult(String[] headers, DataType[] types, Object[][] rows) {
ResolvedSchema schema = ResolvedSchema.physical(headers, types);
ResultProvider provider = new StaticResultProvider(Arrays.stream(rows).map(Row::of).collect(Collectors.toList()));
return TableResultImpl.builder().resultKind(ResultKind.SUCCESS_WITH_CONTENT).schema(ResolvedSchema.physical(headers, types)).resultProvider(provider).setPrintStyle(PrintStyle.tableauWithDataInferredColumnWidths(schema, provider.getRowDataStringConverter(), Integer.MAX_VALUE, true, false)).build();
}
use of org.apache.flink.table.catalog.ResolvedSchema in project flink by apache.
the class KinesisDynamicTableSinkFactoryTest method testBadTableSinkForCustomPartitionerForPartitionedTable.
@Test
public void testBadTableSinkForCustomPartitionerForPartitionedTable() {
ResolvedSchema sinkSchema = defaultSinkSchema();
Map<String, String> sinkOptions = defaultTableOptions().withTableOption(KinesisConnectorOptions.SINK_PARTITIONER, "random").build();
Assertions.assertThatExceptionOfType(ValidationException.class).isThrownBy(() -> createTableSink(sinkSchema, Arrays.asList("name", "curr_id"), sinkOptions)).havingCause().withMessageContaining(String.format("Cannot set %s option for a table defined with a PARTITIONED BY clause", KinesisConnectorOptions.SINK_PARTITIONER.key()));
}
Aggregations