use of org.apache.calcite.plan.hep.HepProgramBuilder in project drill by apache.
the class DefaultSqlHandler method transform.
/**
* Transform RelNode to a new RelNode, targeting the provided set of traits. Also will log the outcome if asked.
*
* @param plannerType The type of Planner to use.
* @param phase The transformation phase we're running.
* @param input The original RelNode
* @param targetTraits The traits we are targeting for output.
* @param log Whether to log the planning phase.
* @return The transformed relnode.
*/
protected RelNode transform(PlannerType plannerType, PlannerPhase phase, RelNode input, RelTraitSet targetTraits, boolean log) {
final Stopwatch watch = Stopwatch.createStarted();
final RuleSet rules = config.getRules(phase);
final RelTraitSet toTraits = targetTraits.simplify();
final RelNode output;
switch(plannerType) {
case HEP_BOTTOM_UP:
case HEP:
{
final HepProgramBuilder hepPgmBldr = new HepProgramBuilder();
if (plannerType == PlannerType.HEP_BOTTOM_UP) {
hepPgmBldr.addMatchOrder(HepMatchOrder.BOTTOM_UP);
}
for (RelOptRule rule : rules) {
hepPgmBldr.addRuleInstance(rule);
}
// Set noDAG = true to avoid caching problems which lead to incorrect Drill work.
final HepPlanner planner = new HepPlanner(hepPgmBldr.build(), context.getPlannerSettings(), true, null, RelOptCostImpl.FACTORY);
JaninoRelMetadataProvider relMetadataProvider = Utilities.registerJaninoRelMetadataProvider();
// Modify RelMetaProvider for every RelNode in the SQL operator Rel tree.
input.accept(new MetaDataProviderModifier(relMetadataProvider));
planner.setRoot(input);
if (!input.getTraitSet().equals(targetTraits)) {
planner.changeTraits(input, toTraits);
}
output = planner.findBestExp();
break;
}
case VOLCANO:
default:
{
// as weird as it seems, the cluster's only planner is the volcano planner.
final RelOptPlanner planner = input.getCluster().getPlanner();
final Program program = Programs.of(rules);
Preconditions.checkArgument(planner instanceof VolcanoPlanner, "Cluster is expected to be constructed using VolcanoPlanner. Was actually of type %s.", planner.getClass().getName());
output = program.run(planner, input, toTraits, ImmutableList.of(), ImmutableList.of());
break;
}
}
if (log) {
log(plannerType, phase, output, logger, watch);
}
return output;
}
use of org.apache.calcite.plan.hep.HepProgramBuilder in project druid by druid-io.
the class Rules method buildHepProgram.
private static Program buildHepProgram(Iterable<? extends RelOptRule> rules, boolean noDag, RelMetadataProvider metadataProvider, int matchLimit) {
final HepProgramBuilder builder = HepProgram.builder();
builder.addMatchLimit(matchLimit);
for (RelOptRule rule : rules) {
builder.addRuleInstance(rule);
}
return Programs.of(builder.build(), noDag, metadataProvider);
}
use of org.apache.calcite.plan.hep.HepProgramBuilder in project hive by apache.
the class HiveMaterializedViewUtils method augmentMaterializationWithTimeInformation.
/**
* Method to enrich the materialization query contained in the input with
* its invalidation.
*/
public static HiveRelOptMaterialization augmentMaterializationWithTimeInformation(HiveRelOptMaterialization materialization, String validTxnsList, ValidTxnWriteIdList materializationTxnList) throws LockException {
// Extract tables used by the query which will in turn be used to generate
// the corresponding txn write ids
List<String> tablesUsed = new ArrayList<>();
new RelVisitor() {
@Override
public void visit(RelNode node, int ordinal, RelNode parent) {
if (node instanceof TableScan) {
TableScan ts = (TableScan) node;
tablesUsed.add(((RelOptHiveTable) ts.getTable()).getHiveTableMD().getFullyQualifiedName());
}
super.visit(node, ordinal, parent);
}
}.go(materialization.queryRel);
ValidTxnWriteIdList currentTxnList = SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList);
// Augment
final RexBuilder rexBuilder = materialization.queryRel.getCluster().getRexBuilder();
final HepProgramBuilder augmentMaterializationProgram = new HepProgramBuilder().addRuleInstance(new HiveAugmentMaterializationRule(rexBuilder, currentTxnList, materializationTxnList));
final HepPlanner augmentMaterializationPlanner = new HepPlanner(augmentMaterializationProgram.build());
augmentMaterializationPlanner.setRoot(materialization.queryRel);
final RelNode modifiedQueryRel = augmentMaterializationPlanner.findBestExp();
return new HiveRelOptMaterialization(materialization.tableRel, modifiedQueryRel, null, materialization.qualifiedTableName, materialization.getScope(), materialization.getRebuildMode());
}
use of org.apache.calcite.plan.hep.HepProgramBuilder in project hive by apache.
the class TestHivePointLookupOptimizerRule method before.
@Before
public void before() {
HepProgramBuilder programBuilder = new HepProgramBuilder();
programBuilder.addRuleInstance(new HivePointLookupOptimizerRule.FilterCondition(2));
planner = new HepPlanner(programBuilder.build());
JavaTypeFactoryImpl typeFactory = new JavaTypeFactoryImpl();
RexBuilder rexBuilder = new RexBuilder(typeFactory);
final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder);
RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class);
doReturn(rowTypeMock).when(tableMock).getRowType();
LogicalTableScan tableScan = LogicalTableScan.create(optCluster, tableMock, Collections.emptyList());
doReturn(tableScan).when(tableMock).toRel(ArgumentMatchers.any());
doReturn(tableMock).when(schemaMock).getTableForMember(any());
lenient().doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD();
builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock);
}
use of org.apache.calcite.plan.hep.HepProgramBuilder in project hive by apache.
the class TestCBORuleFiredOnlyOnce method testRuleFiredOnlyOnce.
@Test
public void testRuleFiredOnlyOnce() {
HiveConf conf = new HiveConf();
// Create HepPlanner
HepProgramBuilder programBuilder = new HepProgramBuilder();
programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN);
programBuilder = programBuilder.addRuleCollection(ImmutableList.<RelOptRule>of(DummyRule.INSTANCE));
// Create rules registry to not trigger a rule more than once
HiveRulesRegistry registry = new HiveRulesRegistry();
HivePlannerContext context = new HivePlannerContext(null, registry, null, null, null, null);
HepPlanner planner = new HepPlanner(programBuilder.build(), context);
// Cluster
RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// Create MD provider
HiveDefaultRelMetadataProvider mdProvider = new HiveDefaultRelMetadataProvider(conf, null);
List<RelMetadataProvider> list = Lists.newArrayList();
list.add(mdProvider.getMetadataProvider());
planner.registerMetadataProviders(list);
RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
final RelNode node = new DummyNode(cluster, cluster.traitSet());
node.getCluster().setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, planner));
planner.setRoot(node);
planner.findBestExp();
// Matches 2 times: one time the original node, one time the new node created by the rule
assertEquals(2, DummyRule.INSTANCE.numberMatches);
// It is fired only once: on the original node
assertEquals(1, DummyRule.INSTANCE.numberOnMatch);
}
Aggregations