use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class LogicalDistribution method create.
public static LogicalDistribution create(RelNode input, RelCollation collation, List<Integer> distKeys) {
RelOptCluster cluster = input.getCluster();
collation = RelCollationTraitDef.INSTANCE.canonize(collation);
RelTraitSet traitSet = input.getTraitSet().replace(Convention.NONE).replace(collation);
return new LogicalDistribution(cluster, traitSet, input, collation, distKeys);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class SubQueryDecorrelator method decorrelateQuery.
/**
* Decorrelates a subquery.
*
* <p>This is the main entry point to {@code SubQueryDecorrelator}.
*
* @param rootRel The node which has SubQuery.
* @return Decorrelate result.
*/
public static Result decorrelateQuery(RelNode rootRel) {
int maxCnfNodeCount = FlinkRelOptUtil.getMaxCnfNodeCount(rootRel);
final CorelMapBuilder builder = new CorelMapBuilder(maxCnfNodeCount);
final CorelMap corelMap = builder.build(rootRel);
if (builder.hasNestedCorScope || builder.hasUnsupportedCorCondition) {
return null;
}
if (!corelMap.hasCorrelation()) {
return Result.EMPTY;
}
RelOptCluster cluster = rootRel.getCluster();
RelBuilder relBuilder = new FlinkRelBuilder(cluster.getPlanner().getContext(), cluster, null);
RexBuilder rexBuilder = cluster.getRexBuilder();
final SubQueryDecorrelator decorrelator = new SubQueryDecorrelator(new SubQueryRelDecorrelator(corelMap, relBuilder, rexBuilder, maxCnfNodeCount), relBuilder);
rootRel.accept(decorrelator);
return new Result(decorrelator.subQueryMap);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class StreamPhysicalPythonGroupWindowAggregateRule method convert.
@Override
public RelNode convert(RelNode rel) {
FlinkLogicalWindowAggregate agg = (FlinkLogicalWindowAggregate) rel;
LogicalWindow window = agg.getWindow();
List<AggregateCall> aggCalls = agg.getAggCallList();
boolean isPandasPythonUDAF = aggCalls.stream().anyMatch(x -> PythonUtil.isPythonAggregate(x, PythonFunctionKind.PANDAS));
if (isPandasPythonUDAF && window instanceof SessionGroupWindow) {
throw new TableException("Session Group Window is currently not supported for Pandas UDAF.");
}
RelNode input = agg.getInput();
RelOptCluster cluster = rel.getCluster();
FlinkRelDistribution requiredDistribution;
if (agg.getGroupCount() != 0) {
requiredDistribution = FlinkRelDistribution.hash(agg.getGroupSet().asList(), true);
} else {
requiredDistribution = FlinkRelDistribution.SINGLETON();
}
RelTraitSet requiredTraitSet = input.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL()).replace(requiredDistribution);
RelTraitSet providedTraitSet = rel.getTraitSet().replace(FlinkConventions.STREAM_PHYSICAL());
RelNode newInput = RelOptRule.convert(input, requiredTraitSet);
ReadableConfig config = ShortcutUtils.unwrapTableConfig(rel);
WindowEmitStrategy emitStrategy = WindowEmitStrategy.apply(config, agg.getWindow());
if (emitStrategy.produceUpdates()) {
throw new TableException("Python Group Window Aggregate Function is currently not supported for early fired or lately fired.");
}
return new StreamPhysicalPythonGroupWindowAggregate(cluster, providedTraitSet, newInput, rel.getRowType(), agg.getGroupSet().toArray(), JavaScalaConversionUtil.toScala(aggCalls), agg.getWindow(), agg.getNamedProperties(), emitStrategy);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project flink by apache.
the class ProjectWindowTableFunctionTransposeRule method createNewTableFunctionScan.
private LogicalTableFunctionScan createNewTableFunctionScan(RelBuilder relBuilder, LogicalTableFunctionScan oldScan, LogicalType timeAttributeType, RelNode newInput, Map<Integer, Integer> mapping) {
relBuilder.push(newInput);
RexNode newCall = rewriteWindowCall((RexCall) oldScan.getCall(), mapping, relBuilder);
RelOptCluster cluster = oldScan.getCluster();
FlinkTypeFactory typeFactory = (FlinkTypeFactory) cluster.getTypeFactory();
RelDataType newScanOutputType = SqlWindowTableFunction.inferRowType(typeFactory, newInput.getRowType(), typeFactory.createFieldTypeFromLogicalType(timeAttributeType));
return LogicalTableFunctionScan.create(cluster, new ArrayList<>(Collections.singleton(newInput)), newCall, oldScan.getElementType(), newScanOutputType, oldScan.getColumnMappings());
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.plan.RelOptCluster in project beam by apache.
the class RelConverter method createOneRow.
// This function creates a single dummy input row for queries that don't read from a table.
// For example: SELECT "hello"
// The code is copy-pasted from Calcite's LogicalValues.createOneRow() with a single line
// change: SqlTypeName.INTEGER replaced by SqlTypeName.BIGINT.
// Would like to call LogicalValues.createOneRow() directly, but it uses type SqlTypeName.INTEGER
// which corresponds to TypeKind.TYPE_INT32 in ZetaSQL, a type not supported in ZetaSQL
// PRODUCT_EXTERNAL mode. See
// https://github.com/google/zetasql/blob/c610a21ffdc110293c1c7bd255a2674ebc7ec7a8/java/com/google/zetasql/TypeFactory.java#L61
static LogicalValues createOneRow(RelOptCluster cluster) {
final RelDataType rowType = cluster.getTypeFactory().builder().add("ZERO", SqlTypeName.BIGINT).nullable(false).build();
final ImmutableList<ImmutableList<RexLiteral>> tuples = ImmutableList.of(ImmutableList.of(cluster.getRexBuilder().makeExactLiteral(BigDecimal.ZERO, rowType.getFieldList().get(0).getType())));
return LogicalValues.create(cluster, rowType, tuples);
}
Aggregations