use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by axbaretto.
the class HivePushPartitionFilterIntoScan method getFilterOnProject.
public static final StoragePluginOptimizerRule getFilterOnProject(OptimizerRulesContext optimizerRulesContext, final String defaultPartitionValue) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.some(DrillProjectRel.class, RelOptHelper.any(DrillScanRel.class))), "HivePushPartitionFilterIntoScan:Filter_On_Project_Hive", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new HivePartitionDescriptor(settings, (DrillScanRel) scanRel, getOptimizerRulesContext().getManagedBuffer(), defaultPartitionValue);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = (DrillScanRel) call.rel(2);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for Hive based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillProjectRel projectRel = call.rel(1);
final DrillScanRel scanRel = call.rel(2);
doOnMatch(call, filterRel, projectRel, scanRel);
}
};
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by axbaretto.
the class HivePushPartitionFilterIntoScan method getFilterOnScan.
public static final StoragePluginOptimizerRule getFilterOnScan(OptimizerRulesContext optimizerRulesContext, final String defaultPartitionValue) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.any(DrillScanRel.class)), "HivePushPartitionFilterIntoScan:Filter_On_Scan_Hive", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new HivePartitionDescriptor(settings, (DrillScanRel) scanRel, getOptimizerRulesContext().getManagedBuffer(), defaultPartitionValue);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = (DrillScanRel) call.rel(1);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for Hive based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillScanRel scanRel = call.rel(1);
doOnMatch(call, filterRel, null, scanRel);
}
};
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by axbaretto.
the class ParquetPruneScanRule method getFilterOnProjectParquet.
public static final RelOptRule getFilterOnProjectParquet(OptimizerRulesContext optimizerRulesContext) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.some(DrillProjectRel.class, RelOptHelper.any(DrillScanRel.class))), "PruneScanRule:Filter_On_Project_Parquet", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new ParquetPartitionDescriptor(settings, (DrillScanRel) scanRel);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = call.rel(2);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for parquet based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof ParquetGroupScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof ParquetGroupScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillProjectRel projectRel = call.rel(1);
final DrillScanRel scanRel = call.rel(2);
doOnMatch(call, filterRel, projectRel, scanRel);
}
};
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project drill by axbaretto.
the class DrillPushProjIntoScan method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final Project proj = call.rel(0);
final TableScan scan = call.rel(1);
try {
ProjectPushInfo columnInfo = PrelUtil.getColumns(scan.getRowType(), proj.getProjects());
// get DrillTable, either wrapped in RelOptTable, or DrillTranslatableTable.
DrillTable table = scan.getTable().unwrap(DrillTable.class);
if (table == null) {
table = scan.getTable().unwrap(DrillTranslatableTable.class).getDrillTable();
}
if (//
columnInfo == null || columnInfo.isStarQuery() || !//
table.getGroupScan().canPushdownProjects(columnInfo.columns)) {
return;
}
final DrillScanRel newScan = new DrillScanRel(scan.getCluster(), scan.getTraitSet().plus(DrillRel.DRILL_LOGICAL), scan.getTable(), columnInfo.createNewRowType(proj.getInput().getCluster().getTypeFactory()), columnInfo.columns);
List<RexNode> newProjects = Lists.newArrayList();
for (RexNode n : proj.getChildExps()) {
newProjects.add(n.accept(columnInfo.getInputRewriter()));
}
final DrillProjectRel newProj = new DrillProjectRel(proj.getCluster(), proj.getTraitSet().plus(DrillRel.DRILL_LOGICAL), newScan, newProjects, proj.getRowType());
if (ProjectRemoveRule.isTrivial(newProj)) {
call.transformTo(newScan);
} else {
call.transformTo(newProj);
}
} catch (IOException e) {
throw new DrillRuntimeException(e);
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project hive by apache.
the class HiveAugmentMaterializationRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
final TableScan tableScan = call.rel(0);
if (!visited.add(tableScan)) {
// Already visited
return;
}
final String tableQName = ((RelOptHiveTable) tableScan.getTable()).getHiveTableMD().getFullyQualifiedName();
final ValidWriteIdList tableCurrentTxnList = currentTxnList.getTableValidWriteIdList(tableQName);
final ValidWriteIdList tableMaterializationTxnList = materializationTxnList.getTableValidWriteIdList(tableQName);
if (TxnIdUtils.checkEquivalentWriteIds(tableCurrentTxnList, tableMaterializationTxnList)) {
// nothing to do
return;
}
// ROW__ID: struct<transactionid:bigint,bucketid:int,rowid:bigint>
int rowIDPos = tableScan.getTable().getRowType().getField(VirtualColumn.ROWID.getName(), false, false).getIndex();
RexNode rowIDFieldAccess = rexBuilder.makeFieldAccess(rexBuilder.makeInputRef(tableScan.getTable().getRowType().getFieldList().get(rowIDPos).getType(), rowIDPos), 0);
// Now we create the filter with the transactions information.
// In particular, each table in the materialization will only have contents such that:
// ROW_ID.writeid <= high_watermark and ROW_ID.writeid not in (open/invalid_ids)
// Hence, we add that condition on top of the source table.
// The rewriting will then have the possibility to create partial rewritings that read
// the materialization and the source tables, and hence, produce an incremental
// rebuild that is more efficient than the full rebuild.
final RelBuilder relBuilder = call.builder();
relBuilder.push(tableScan);
List<RexNode> conds = new ArrayList<>();
RelDataType bigIntType = relBuilder.getTypeFactory().createSqlType(SqlTypeName.BIGINT);
final RexNode literalHighWatermark = rexBuilder.makeLiteral(tableMaterializationTxnList.getHighWatermark(), bigIntType, false);
conds.add(rexBuilder.makeCall(SqlStdOperatorTable.LESS_THAN_OR_EQUAL, ImmutableList.of(rowIDFieldAccess, literalHighWatermark)));
for (long invalidTxn : tableMaterializationTxnList.getInvalidWriteIds()) {
final RexNode literalInvalidTxn = rexBuilder.makeLiteral(invalidTxn, bigIntType, false);
conds.add(rexBuilder.makeCall(SqlStdOperatorTable.NOT_EQUALS, ImmutableList.of(rowIDFieldAccess, literalInvalidTxn)));
}
relBuilder.filter(conds);
call.transformTo(relBuilder.build());
}
Aggregations