use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project storm by apache.
the class StreamsProjectRule method convert.
@Override
public RelNode convert(RelNode rel) {
final Project project = (Project) rel;
final RelNode input = project.getInput();
return new StreamsProjectRel(project.getCluster(), project.getTraitSet().replace(StreamsLogicalConvention.INSTANCE), convert(input, input.getTraitSet().replace(StreamsLogicalConvention.INSTANCE)), project.getProjects(), project.getRowType());
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project flink by apache.
the class HiveParserDMLHelper method handleDestSchema.
private RelNode handleDestSchema(SingleRel queryRelNode, Table destTable, List<String> destSchema, Set<String> staticParts) throws SemanticException {
if (destSchema == null || destSchema.isEmpty()) {
return queryRelNode;
}
// natural schema should contain regular cols + dynamic cols
List<FieldSchema> naturalSchema = new ArrayList<>(destTable.getCols());
if (destTable.isPartitioned()) {
naturalSchema.addAll(destTable.getTTable().getPartitionKeys().stream().filter(f -> !staticParts.contains(f.getName())).collect(Collectors.toList()));
}
// we don't need to do anything if the dest schema is the same as natural schema
if (destSchema.equals(HiveCatalog.getFieldNames(naturalSchema))) {
return queryRelNode;
}
// build a list to create a Project on top of original Project
// for each col in dest table, if it's in dest schema, store its corresponding index in the
// dest schema, otherwise store its type and we'll create NULL for it
List<Object> updatedIndices = new ArrayList<>(naturalSchema.size());
for (FieldSchema col : naturalSchema) {
int index = destSchema.indexOf(col.getName());
if (index < 0) {
updatedIndices.add(HiveParserTypeConverter.convert(TypeInfoUtils.getTypeInfoFromTypeString(col.getType()), plannerContext.getTypeFactory()));
} else {
updatedIndices.add(index);
}
}
if (queryRelNode instanceof Project) {
return addProjectForDestSchema((Project) queryRelNode, updatedIndices);
} else if (queryRelNode instanceof Sort) {
Sort sort = (Sort) queryRelNode;
RelNode sortInput = sort.getInput();
// DIST + LIMIT
if (sortInput instanceof LogicalDistribution) {
RelNode newDist = handleDestSchemaForDist((LogicalDistribution) sortInput, updatedIndices);
sort.replaceInput(0, newDist);
return sort;
}
// PROJECT + SORT
RelNode addedProject = addProjectForDestSchema((Project) sortInput, updatedIndices);
// we may need to update the field collations
List<RelFieldCollation> fieldCollations = sort.getCollation().getFieldCollations();
if (!fieldCollations.isEmpty()) {
sort.replaceInput(0, null);
sort = LogicalSort.create(addedProject, updateRelCollation(sort.getCollation(), updatedIndices), sort.offset, sort.fetch);
}
sort.replaceInput(0, addedProject);
return sort;
} else {
// PROJECT + DIST
return handleDestSchemaForDist((LogicalDistribution) queryRelNode, updatedIndices);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project flink by apache.
the class HiveParserDMLHelper method handleDestSchemaForDist.
private RelNode handleDestSchemaForDist(LogicalDistribution hiveDist, List<Object> updatedIndices) throws SemanticException {
Project project = (Project) hiveDist.getInput();
RelNode addedProject = addProjectForDestSchema(project, updatedIndices);
// disconnect the original LogicalDistribution
hiveDist.replaceInput(0, null);
return LogicalDistribution.create(addedProject, updateRelCollation(hiveDist.getCollation(), updatedIndices), updateDistKeys(hiveDist.getDistKeys(), updatedIndices));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project flink by apache.
the class HiveParserCalcitePlanner method genLogicalPlan.
private RelNode genLogicalPlan(HiveParserQB qb, boolean outerMostQB, Map<String, Integer> outerNameToPosMap, HiveParserRowResolver outerRR) throws SemanticException {
RelNode res;
// First generate all the opInfos for the elements in the from clause
Map<String, RelNode> aliasToRel = new HashMap<>();
// 0. Check if we can handle the SubQuery;
// canHandleQbForCbo returns null if the query can be handled.
String reason = HiveParserUtils.canHandleQbForCbo(semanticAnalyzer.getQueryProperties());
if (reason != null) {
String msg = "CBO can not handle Sub Query" + " because it: " + reason;
throw new SemanticException(msg);
}
// 1.1. Recurse over the subqueries to fill the subquery part of the plan
for (String subqAlias : qb.getSubqAliases()) {
HiveParserQBExpr qbexpr = qb.getSubqForAlias(subqAlias);
RelNode relNode = genLogicalPlan(qbexpr);
aliasToRel.put(subqAlias, relNode);
if (qb.getViewToTabSchema().containsKey(subqAlias)) {
if (!(relNode instanceof Project)) {
throw new SemanticException("View " + subqAlias + " is corresponding to " + relNode.toString() + ", rather than a Project.");
}
}
}
// 1.2 Recurse over all the source tables
for (String tableAlias : qb.getTabAliases()) {
RelNode op = genTableLogicalPlan(tableAlias, qb);
aliasToRel.put(tableAlias, op);
}
if (aliasToRel.isEmpty()) {
RelNode dummySrc = LogicalValues.createOneRow(cluster);
aliasToRel.put(HiveParserSemanticAnalyzer.DUMMY_TABLE, dummySrc);
HiveParserRowResolver dummyRR = new HiveParserRowResolver();
dummyRR.put(HiveParserSemanticAnalyzer.DUMMY_TABLE, "dummy_col", new ColumnInfo(getColumnInternalName(0), TypeInfoFactory.intTypeInfo, HiveParserSemanticAnalyzer.DUMMY_TABLE, false));
relToRowResolver.put(dummySrc, dummyRR);
relToHiveColNameCalcitePosMap.put(dummySrc, buildHiveToCalciteColumnMap(dummyRR));
}
if (!qb.getParseInfo().getAliasToLateralViews().isEmpty()) {
// process lateral views
res = genLateralViewPlan(qb, aliasToRel);
} else if (qb.getParseInfo().getJoinExpr() != null) {
// 1.3 process join
res = genJoinLogicalPlan(qb.getParseInfo().getJoinExpr(), aliasToRel);
} else {
// If no join then there should only be either 1 TS or 1 SubQuery
res = aliasToRel.values().iterator().next();
}
// 2. Build Rel for where Clause
RelNode filterRel = genFilterLogicalPlan(qb, res, outerNameToPosMap, outerRR);
res = (filterRel == null) ? res : filterRel;
RelNode starSrcRel = res;
// 3. Build Rel for GB Clause
RelNode gbRel = genGBLogicalPlan(qb, res);
res = gbRel == null ? res : gbRel;
// 4. Build Rel for GB Having Clause
RelNode gbHavingRel = genGBHavingLogicalPlan(qb, res);
res = gbHavingRel == null ? res : gbHavingRel;
// 5. Build Rel for Select Clause
RelNode selectRel = genSelectLogicalPlan(qb, res, starSrcRel, outerNameToPosMap, outerRR);
res = selectRel == null ? res : selectRel;
// 6. Build Rel for OB Clause
Pair<Sort, RelNode> obAndTopProj = genOBLogicalPlan(qb, res, outerMostQB);
Sort orderRel = obAndTopProj.getKey();
RelNode topConstrainingProjRel = obAndTopProj.getValue();
res = orderRel == null ? res : orderRel;
// Build Rel for SortBy/ClusterBy/DistributeBy. It can happen only if we don't have OrderBy.
if (orderRel == null) {
Pair<RelNode, RelNode> distAndTopProj = genDistSortBy(qb, res, outerMostQB);
RelNode distRel = distAndTopProj.getKey();
topConstrainingProjRel = distAndTopProj.getValue();
res = distRel == null ? res : distRel;
}
// 7. Build Rel for Limit Clause
Sort limitRel = genLimitLogicalPlan(qb, res);
if (limitRel != null) {
if (orderRel != null) {
// merge limit into the order-by node
HiveParserRowResolver orderRR = relToRowResolver.remove(orderRel);
Map<String, Integer> orderColNameToPos = relToHiveColNameCalcitePosMap.remove(orderRel);
res = LogicalSort.create(orderRel.getInput(), orderRel.collation, limitRel.offset, limitRel.fetch);
relToRowResolver.put(res, orderRR);
relToHiveColNameCalcitePosMap.put(res, orderColNameToPos);
relToRowResolver.remove(limitRel);
relToHiveColNameCalcitePosMap.remove(limitRel);
} else {
res = limitRel;
}
}
// 8. Introduce top constraining select if needed.
if (topConstrainingProjRel != null) {
List<RexNode> originalInputRefs = topConstrainingProjRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver topConstrainingProjRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(topConstrainingProjRR, relToRowResolver.get(topConstrainingProjRel))) {
LOG.warn("Duplicates detected when adding columns to RR: see previous message");
}
res = genSelectRelNode(originalInputRefs, topConstrainingProjRR, res);
}
// TODO: cleanup this
if (qb.getParseInfo().getAlias() != null) {
HiveParserRowResolver rr = relToRowResolver.get(res);
HiveParserRowResolver newRR = new HiveParserRowResolver();
String alias = qb.getParseInfo().getAlias();
for (ColumnInfo colInfo : rr.getColumnInfos()) {
String name = colInfo.getInternalName();
String[] tmp = rr.reverseLookup(name);
if ("".equals(tmp[0]) || tmp[1] == null) {
// ast expression is not a valid column name for table
tmp[1] = colInfo.getInternalName();
}
ColumnInfo newColInfo = new ColumnInfo(colInfo);
newColInfo.setTabAlias(alias);
newRR.put(alias, tmp[1], newColInfo);
}
relToRowResolver.put(res, newRR);
relToHiveColNameCalcitePosMap.put(res, buildHiveToCalciteColumnMap(newRR));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created Plan for Query Block " + qb.getId());
}
semanticAnalyzer.setQB(qb);
return res;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project calcite by apache.
the class CalcitePrepareImpl method analyze_.
private AnalyzeViewResult analyze_(SqlValidator validator, String sql, SqlNode sqlNode, RelRoot root, boolean fail) {
final RexBuilder rexBuilder = root.rel.getCluster().getRexBuilder();
RelNode rel = root.rel;
final RelNode viewRel = rel;
Project project;
if (rel instanceof Project) {
project = (Project) rel;
rel = project.getInput();
} else {
project = null;
}
Filter filter;
if (rel instanceof Filter) {
filter = (Filter) rel;
rel = filter.getInput();
} else {
filter = null;
}
TableScan scan;
if (rel instanceof TableScan) {
scan = (TableScan) rel;
} else {
scan = null;
}
if (scan == null) {
if (fail) {
throw validator.newValidationError(sqlNode, RESOURCE.modifiableViewMustBeBasedOnSingleTable());
}
return new AnalyzeViewResult(this, validator, sql, sqlNode, validator.getValidatedNodeType(sqlNode), root, null, null, null, null, false);
}
final RelOptTable targetRelTable = scan.getTable();
final RelDataType targetRowType = targetRelTable.getRowType();
final Table table = targetRelTable.unwrap(Table.class);
final List<String> tablePath = targetRelTable.getQualifiedName();
assert table != null;
List<Integer> columnMapping;
final Map<Integer, RexNode> projectMap = new HashMap<>();
if (project == null) {
columnMapping = ImmutableIntList.range(0, targetRowType.getFieldCount());
} else {
columnMapping = new ArrayList<>();
for (Ord<RexNode> node : Ord.zip(project.getProjects())) {
if (node.e instanceof RexInputRef) {
RexInputRef rexInputRef = (RexInputRef) node.e;
int index = rexInputRef.getIndex();
if (projectMap.get(index) != null) {
if (fail) {
throw validator.newValidationError(sqlNode, RESOURCE.moreThanOneMappedColumn(targetRowType.getFieldList().get(index).getName(), Util.last(tablePath)));
}
return new AnalyzeViewResult(this, validator, sql, sqlNode, validator.getValidatedNodeType(sqlNode), root, null, null, null, null, false);
}
projectMap.put(index, rexBuilder.makeInputRef(viewRel, node.i));
columnMapping.add(index);
} else {
columnMapping.add(-1);
}
}
}
final RexNode constraint;
if (filter != null) {
constraint = filter.getCondition();
} else {
constraint = rexBuilder.makeLiteral(true);
}
final List<RexNode> filters = new ArrayList<>();
// If we put a constraint in projectMap above, then filters will not be empty despite
// being a modifiable view.
final List<RexNode> filters2 = new ArrayList<>();
boolean retry = false;
RelOptUtil.inferViewPredicates(projectMap, filters, constraint);
if (fail && !filters.isEmpty()) {
final Map<Integer, RexNode> projectMap2 = new HashMap<>();
RelOptUtil.inferViewPredicates(projectMap2, filters2, constraint);
if (!filters2.isEmpty()) {
throw validator.newValidationError(sqlNode, RESOURCE.modifiableViewMustHaveOnlyEqualityPredicates());
}
retry = true;
}
// Check that all columns that are not projected have a constant value
for (RelDataTypeField field : targetRowType.getFieldList()) {
final int x = columnMapping.indexOf(field.getIndex());
if (x >= 0) {
assert Util.skip(columnMapping, x + 1).indexOf(field.getIndex()) < 0 : "column projected more than once; should have checked above";
// target column is projected
continue;
}
if (projectMap.get(field.getIndex()) != null) {
// constant expression
continue;
}
if (field.getType().isNullable()) {
// don't need expression for nullable columns; NULL suffices
continue;
}
if (fail) {
throw validator.newValidationError(sqlNode, RESOURCE.noValueSuppliedForViewColumn(field.getName(), Util.last(tablePath)));
}
return new AnalyzeViewResult(this, validator, sql, sqlNode, validator.getValidatedNodeType(sqlNode), root, null, null, null, null, false);
}
final boolean modifiable = filters.isEmpty() || retry && filters2.isEmpty();
return new AnalyzeViewResult(this, validator, sql, sqlNode, validator.getValidatedNodeType(sqlNode), root, modifiable ? table : null, ImmutableList.copyOf(tablePath), constraint, ImmutableIntList.copyOf(columnMapping), modifiable);
}
Aggregations