use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project drill by apache.
the class DrillPushProjectIntoScanRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
Project project = call.rel(0);
TableScan scan = call.rel(1);
try {
if (scan.getRowType().getFieldList().isEmpty()) {
return;
}
ProjectPushInfo projectPushInfo = DrillRelOptUtil.getFieldsInformation(scan.getRowType(), project.getProjects());
if (!canPushProjectIntoScan(scan.getTable(), projectPushInfo) || skipScanConversion(projectPushInfo.createNewRowType(project.getCluster().getTypeFactory()), scan)) {
// project above scan may be removed in ProjectRemoveRule for the case when it is trivial
return;
}
TableScan newScan = createScan(scan, projectPushInfo);
List<RexNode> newProjects = new ArrayList<>();
for (RexNode n : project.getChildExps()) {
newProjects.add(n.accept(projectPushInfo.getInputReWriter()));
}
Project newProject = createProject(project, newScan, newProjects);
if (ProjectRemoveRule.isTrivial(newProject)) {
call.transformTo(newScan);
} else {
call.transformTo(newProject);
}
} catch (IOException e) {
throw new DrillRuntimeException(e);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project drill by apache.
the class ElasticsearchProjectRule method convert.
@Override
public RelNode convert(RelNode relNode) {
Project project = (Project) relNode;
NodeTypeFinder projectFinder = new NodeTypeFinder(ElasticsearchProject.class);
project.getInput().accept(projectFinder);
if (projectFinder.containsNode) {
// Calcite adapter allows only a single Elasticsearch project per tree
return null;
}
RelTraitSet traitSet = project.getTraitSet().replace(out);
List<RexNode> innerProjections = new ArrayList<>();
RelDataType rowType = project.getInput().getRowType();
// check for literals only without input exprs
DrillRelOptUtil.InputRefVisitor collectRefs = new DrillRelOptUtil.InputRefVisitor();
project.getChildExps().forEach(exp -> exp.accept(collectRefs));
if (!collectRefs.getInputRefs().isEmpty()) {
for (RelDataTypeField relDataTypeField : rowType.getFieldList()) {
innerProjections.add(project.getCluster().getRexBuilder().makeInputRef(project.getInput(), relDataTypeField.getIndex()));
}
}
boolean allExprsInputRefs = project.getChildExps().stream().allMatch(rexNode -> rexNode instanceof RexInputRef);
if (collectRefs.getInputRefs().isEmpty() || allExprsInputRefs) {
return CalciteUtils.createProject(traitSet, convert(project.getInput(), out), project.getProjects(), project.getRowType());
} else {
Project elasticsearchProject = CalciteUtils.createProject(traitSet, convert(project.getInput(), out), innerProjections, project.getInput().getRowType());
return project.copy(project.getTraitSet(), elasticsearchProject, project.getProjects(), project.getRowType());
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project drill by apache.
the class PluginProjectRule method convert.
@Override
public RelNode convert(RelNode rel) {
Project project = (Project) rel;
if (!getPluginImplementor().splitProject(project)) {
return new PluginProjectRel(getOutConvention(), project.getCluster(), project.getTraitSet().replace(getOutConvention()), convert(project.getInput(), project.getTraitSet().replace(getOutConvention())), project.getProjects(), project.getRowType());
}
RelDataType inputRowType = project.getInput().getRowType();
if (inputRowType.getFieldList().isEmpty()) {
return null;
}
DrillRelOptUtil.ProjectPushInfo projectPushInfo = DrillRelOptUtil.getFieldsInformation(inputRowType, project.getProjects());
Project pluginProject = createPluginProject(project, projectPushInfo);
if (Utilities.isStarQuery(projectPushInfo.getFields()) || pluginProject.getRowType().equals(inputRowType)) {
return null;
}
List<RexNode> newProjects = project.getChildExps().stream().map(n -> n.accept(projectPushInfo.getInputReWriter())).collect(Collectors.toList());
Project newProject = createProject(project, pluginProject, newProjects);
if (ProjectRemoveRule.isTrivial(newProject)) {
return pluginProject;
} else {
return newProject;
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project flink by apache.
the class HiveParserCalcitePlanner method genOBLogicalPlan.
private Pair<Sort, RelNode> genOBLogicalPlan(HiveParserQB qb, RelNode srcRel, boolean outermostOB) throws SemanticException {
Sort sortRel = null;
RelNode originalOBInput = null;
HiveParserQBParseInfo qbp = qb.getParseInfo();
String dest = qbp.getClauseNames().iterator().next();
HiveParserASTNode obAST = qbp.getOrderByForClause(dest);
if (obAST != null) {
// 1. OB Expr sanity test
// in strict mode, in the presence of order by, limit must be specified
Integer limit = qb.getParseInfo().getDestLimit(dest);
if (limit == null) {
String mapRedMode = semanticAnalyzer.getConf().getVar(HiveConf.ConfVars.HIVEMAPREDMODE);
boolean banLargeQuery = Boolean.parseBoolean(semanticAnalyzer.getConf().get("hive.strict.checks.large.query", "false"));
if ("strict".equalsIgnoreCase(mapRedMode) || banLargeQuery) {
throw new SemanticException(generateErrorMessage(obAST, "Order by-s without limit"));
}
}
// 2. Walk through OB exprs and extract field collations and additional
// virtual columns needed
final List<RexNode> virtualCols = new ArrayList<>();
final List<RelFieldCollation> fieldCollations = new ArrayList<>();
int fieldIndex;
List<Node> obASTExprLst = obAST.getChildren();
HiveParserASTNode obASTExpr;
HiveParserASTNode nullOrderASTExpr;
List<Pair<HiveParserASTNode, TypeInfo>> vcASTAndType = new ArrayList<>();
HiveParserRowResolver inputRR = relToRowResolver.get(srcRel);
HiveParserRowResolver outputRR = new HiveParserRowResolver();
HiveParserRexNodeConverter converter = new HiveParserRexNodeConverter(cluster, srcRel.getRowType(), relToHiveColNameCalcitePosMap.get(srcRel), 0, false, funcConverter);
int numSrcFields = srcRel.getRowType().getFieldCount();
for (Node node : obASTExprLst) {
// 2.1 Convert AST Expr to ExprNode
obASTExpr = (HiveParserASTNode) node;
nullOrderASTExpr = (HiveParserASTNode) obASTExpr.getChild(0);
HiveParserASTNode ref = (HiveParserASTNode) nullOrderASTExpr.getChild(0);
Map<HiveParserASTNode, ExprNodeDesc> astToExprNodeDesc = semanticAnalyzer.genAllExprNodeDesc(ref, inputRR);
ExprNodeDesc obExprNodeDesc = astToExprNodeDesc.get(ref);
if (obExprNodeDesc == null) {
throw new SemanticException("Invalid order by expression: " + obASTExpr.toString());
}
// 2.2 Convert ExprNode to RexNode
RexNode rexNode = converter.convert(obExprNodeDesc).accept(funcConverter);
// present in the child (& hence we add a child Project Rel)
if (rexNode instanceof RexInputRef) {
fieldIndex = ((RexInputRef) rexNode).getIndex();
} else {
fieldIndex = numSrcFields + virtualCols.size();
virtualCols.add(rexNode);
vcASTAndType.add(new Pair<>(ref, obExprNodeDesc.getTypeInfo()));
}
// 2.4 Determine the Direction of order by
RelFieldCollation.Direction direction = RelFieldCollation.Direction.DESCENDING;
if (obASTExpr.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
direction = RelFieldCollation.Direction.ASCENDING;
}
RelFieldCollation.NullDirection nullOrder;
if (nullOrderASTExpr.getType() == HiveASTParser.TOK_NULLS_FIRST) {
nullOrder = RelFieldCollation.NullDirection.FIRST;
} else if (nullOrderASTExpr.getType() == HiveASTParser.TOK_NULLS_LAST) {
nullOrder = RelFieldCollation.NullDirection.LAST;
} else {
throw new SemanticException("Unexpected null ordering option: " + nullOrderASTExpr.getType());
}
// 2.5 Add to field collations
fieldCollations.add(new RelFieldCollation(fieldIndex, direction, nullOrder));
}
// 3. Add Child Project Rel if needed, Generate Output RR, input Sel Rel
// for top constraining Sel
RelNode obInputRel = srcRel;
if (!virtualCols.isEmpty()) {
List<RexNode> originalInputRefs = srcRel.getRowType().getFieldList().stream().map(input -> new RexInputRef(input.getIndex(), input.getType())).collect(Collectors.toList());
HiveParserRowResolver obSyntheticProjectRR = new HiveParserRowResolver();
if (!HiveParserRowResolver.add(obSyntheticProjectRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
int vcolPos = inputRR.getRowSchema().getSignature().size();
for (Pair<HiveParserASTNode, TypeInfo> astTypePair : vcASTAndType) {
obSyntheticProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo(getColumnInternalName(vcolPos), astTypePair.getValue(), null, false));
vcolPos++;
}
obInputRel = genSelectRelNode(CompositeList.of(originalInputRefs, virtualCols), obSyntheticProjectRR, srcRel);
if (outermostOB) {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
} else {
if (!HiveParserRowResolver.add(outputRR, obSyntheticProjectRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
originalOBInput = srcRel;
} else {
if (!HiveParserRowResolver.add(outputRR, inputRR)) {
throw new SemanticException("Duplicates detected when adding columns to RR: see previous message");
}
}
// 4. Construct SortRel
RelTraitSet traitSet = cluster.traitSet();
RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations));
sortRel = LogicalSort.create(obInputRel, canonizedCollation, null, null);
// 5. Update the maps
Map<String, Integer> hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR);
relToRowResolver.put(sortRel, outputRR);
relToHiveColNameCalcitePosMap.put(sortRel, hiveColNameCalcitePosMap);
}
return (new Pair<>(sortRel, originalOBInput));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Project in project flink by apache.
the class HiveParserDMLHelper method createInsertOperationInfo.
public Tuple4<ObjectIdentifier, QueryOperation, Map<String, String>, Boolean> createInsertOperationInfo(RelNode queryRelNode, Table destTable, Map<String, String> staticPartSpec, List<String> destSchema, boolean overwrite) throws SemanticException {
// sanity check
Preconditions.checkArgument(queryRelNode instanceof Project || queryRelNode instanceof Sort || queryRelNode instanceof LogicalDistribution, "Expect top RelNode to be Project, Sort, or LogicalDistribution, actually got " + queryRelNode);
if (!(queryRelNode instanceof Project)) {
RelNode parent = ((SingleRel) queryRelNode).getInput();
// SEL + SORT or SEL + DIST + LIMIT
Preconditions.checkArgument(parent instanceof Project || parent instanceof LogicalDistribution, "Expect input to be a Project or LogicalDistribution, actually got " + parent);
if (parent instanceof LogicalDistribution) {
RelNode grandParent = ((LogicalDistribution) parent).getInput();
Preconditions.checkArgument(grandParent instanceof Project, "Expect input of LogicalDistribution to be a Project, actually got " + grandParent);
}
}
// handle dest schema, e.g. insert into dest(.,.,.) select ...
queryRelNode = handleDestSchema((SingleRel) queryRelNode, destTable, destSchema, staticPartSpec.keySet());
// track each target col and its expected type
RelDataTypeFactory typeFactory = plannerContext.getTypeFactory();
LinkedHashMap<String, RelDataType> targetColToCalcType = new LinkedHashMap<>();
List<TypeInfo> targetHiveTypes = new ArrayList<>();
List<FieldSchema> allCols = new ArrayList<>(destTable.getCols());
allCols.addAll(destTable.getPartCols());
for (FieldSchema col : allCols) {
TypeInfo hiveType = TypeInfoUtils.getTypeInfoFromTypeString(col.getType());
targetHiveTypes.add(hiveType);
targetColToCalcType.put(col.getName(), HiveParserTypeConverter.convert(hiveType, typeFactory));
}
// add static partitions to query source
if (!staticPartSpec.isEmpty()) {
if (queryRelNode instanceof Project) {
queryRelNode = replaceProjectForStaticPart((Project) queryRelNode, staticPartSpec, destTable, targetColToCalcType);
} else if (queryRelNode instanceof Sort) {
Sort sort = (Sort) queryRelNode;
RelNode oldInput = sort.getInput();
RelNode newInput;
if (oldInput instanceof LogicalDistribution) {
newInput = replaceDistForStaticParts((LogicalDistribution) oldInput, destTable, staticPartSpec, targetColToCalcType);
} else {
newInput = replaceProjectForStaticPart((Project) oldInput, staticPartSpec, destTable, targetColToCalcType);
// we may need to shift the field collations
final int numDynmPart = destTable.getTTable().getPartitionKeys().size() - staticPartSpec.size();
if (!sort.getCollation().getFieldCollations().isEmpty() && numDynmPart > 0) {
sort.replaceInput(0, null);
sort = LogicalSort.create(newInput, shiftRelCollation(sort.getCollation(), (Project) oldInput, staticPartSpec.size(), numDynmPart), sort.offset, sort.fetch);
}
}
sort.replaceInput(0, newInput);
queryRelNode = sort;
} else {
queryRelNode = replaceDistForStaticParts((LogicalDistribution) queryRelNode, destTable, staticPartSpec, targetColToCalcType);
}
}
// add type conversions
queryRelNode = addTypeConversions(plannerContext.getCluster().getRexBuilder(), queryRelNode, new ArrayList<>(targetColToCalcType.values()), targetHiveTypes, funcConverter);
// create identifier
List<String> targetTablePath = Arrays.asList(destTable.getDbName(), destTable.getTableName());
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(targetTablePath);
ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
return Tuple4.of(identifier, new PlannerQueryOperation(queryRelNode), staticPartSpec, overwrite);
}
Aggregations