use of org.apache.drill.exec.physical.base.IndexGroupScan in project drill by apache.
the class CoveringPlanNoFilterGenerator method convertChild.
public RelNode convertChild() throws InvalidRelException {
Preconditions.checkNotNull(indexContext.getSort());
if (indexGroupScan == null) {
logger.error("Null indexgroupScan in CoveringIndexPlanGenerator.convertChild");
return null;
}
// update sort expressions in context
IndexPlanUtils.updateSortExpression(indexContext, indexContext.getSort() != null ? indexContext.getCollation().getFieldCollations() : null);
ScanPrel indexScanPrel = IndexPlanUtils.buildCoveringIndexScan(origScan, indexGroupScan, indexContext, indexDesc);
((IndexGroupScan) indexScanPrel.getGroupScan()).setStatistics(((DbGroupScan) IndexPlanUtils.getGroupScan(origScan)).getStatistics());
RelTraitSet indexScanTraitSet = indexScanPrel.getTraitSet();
RelNode finalRel = indexScanPrel;
if (indexContext.getLowerProject() != null) {
RelCollation collation = IndexPlanUtils.buildCollationProject(indexContext.getLowerProject().getProjects(), null, indexContext.getScan(), functionInfo, indexContext);
finalRel = new ProjectPrel(indexContext.getScan().getCluster(), indexScanTraitSet.plus(collation), indexScanPrel, indexContext.getLowerProject().getProjects(), indexContext.getLowerProject().getRowType());
if (functionInfo.hasFunctional()) {
// if there is functional index field, then a rewrite may be needed in upperProject/indexProject
// merge upperProject with indexProjectPrel(from origProject) if both exist,
ProjectPrel newProject = (ProjectPrel) finalRel;
// then rewrite functional expressions in new project.
List<RexNode> newProjects = Lists.newArrayList();
DrillParseContext parseContxt = new DrillParseContext(PrelUtil.getPlannerSettings(newProject.getCluster()));
for (RexNode projectRex : newProject.getProjects()) {
RexNode newRex = IndexPlanUtils.rewriteFunctionalRex(indexContext, parseContxt, null, origScan, projectRex, indexScanPrel.getRowType(), functionInfo);
newProjects.add(newRex);
}
ProjectPrel rewrittenProject = new ProjectPrel(newProject.getCluster(), collation == null ? newProject.getTraitSet() : newProject.getTraitSet().plus(collation), indexScanPrel, newProjects, newProject.getRowType());
finalRel = rewrittenProject;
}
}
finalRel = getSortNode(indexContext, finalRel, true, isSingletonSortedStream, indexContext.getExchange() != null);
if (finalRel == null) {
return null;
}
finalRel = Prule.convert(finalRel, finalRel.getTraitSet().plus(Prel.DRILL_PHYSICAL));
logger.debug("CoveringPlanNoFilterGenerator got finalRel {} from origScan {}, original digest {}, new digest {}.", finalRel.toString(), indexContext.getScan().toString(), indexContext.getLowerProject() != null ? indexContext.getLowerProject().getDigest() : indexContext.getScan().getDigest(), finalRel.getDigest());
return finalRel;
}
use of org.apache.drill.exec.physical.base.IndexGroupScan in project drill by apache.
the class NonCoveringIndexPlanGenerator method convertChild.
@Override
public RelNode convertChild(final RelNode topRel, final RelNode input) throws InvalidRelException {
if (indexGroupScan == null) {
logger.error("Null indexgroupScan in NonCoveringIndexPlanGenerator.convertChild");
return null;
}
RelDataType dbscanRowType = convertRowType(origScan.getRowType(), origScan.getCluster().getTypeFactory());
RelDataType indexScanRowType = FunctionalIndexHelper.convertRowTypeForIndexScan(origScan, indexContext.getOrigMarker(), indexGroupScan, functionInfo);
DrillDistributionTrait partition = IndexPlanUtils.scanIsPartition(IndexPlanUtils.getGroupScan(origScan)) ? DrillDistributionTrait.RANDOM_DISTRIBUTED : DrillDistributionTrait.SINGLETON;
ScanPrel indexScanPrel = new ScanPrel(origScan.getCluster(), origScan.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(partition), indexGroupScan, indexScanRowType, origScan.getTable());
DbGroupScan origDbGroupScan = (DbGroupScan) IndexPlanUtils.getGroupScan(origScan);
// right (build) side of the rowkey join: do a distribution of project-filter-indexscan subplan
RexNode convertedIndexCondition = FunctionalIndexHelper.convertConditionForIndexScan(indexCondition, origScan, indexScanRowType, builder, functionInfo);
FilterPrel rightIndexFilterPrel = new FilterPrel(indexScanPrel.getCluster(), indexScanPrel.getTraitSet(), indexScanPrel, convertedIndexCondition);
double finalRowCount = indexGroupScan.getRowCount(indexContext.getOrigCondition(), origScan);
// project the rowkey column from the index scan
List<RexNode> rightProjectExprs = Lists.newArrayList();
// indexGroupScan.getRowKeyOrdinal();
int rightRowKeyIndex = getRowKeyIndex(indexScanPrel.getRowType(), origScan);
assert rightRowKeyIndex >= 0;
rightProjectExprs.add(RexInputRef.of(rightRowKeyIndex, indexScanPrel.getRowType()));
final List<RelDataTypeField> indexScanFields = indexScanPrel.getRowType().getFieldList();
final RelDataTypeFactory.FieldInfoBuilder rightFieldTypeBuilder = indexScanPrel.getCluster().getTypeFactory().builder();
// build the row type for the right Project
final RelDataTypeField rightRowKeyField = indexScanFields.get(rightRowKeyIndex);
rightFieldTypeBuilder.add(rightRowKeyField);
final RelDataType rightProjectRowType = rightFieldTypeBuilder.build();
final ProjectPrel rightIndexProjectPrel = new ProjectPrel(indexScanPrel.getCluster(), indexScanPrel.getTraitSet(), rightIndexFilterPrel, rightProjectExprs, rightProjectRowType);
// create a RANGE PARTITION on the right side (this could be removed later during ExcessiveExchangeIdentifier phase
// if the estimated row count is smaller than slice_target
final RelNode rangeDistRight = createRangeDistRight(rightIndexProjectPrel, rightRowKeyField, origDbGroupScan);
// the range partitioning adds an extra column for the partition id but in the final plan we already have a
// renaming Project for the _id field inserted as part of the JoinPrelRenameVisitor. Thus, we are not inserting
// a separate Project here.
final RelNode convertedRight = rangeDistRight;
// left (probe) side of the rowkey join
List<SchemaPath> cols = new ArrayList<SchemaPath>(origDbGroupScan.getColumns());
if (!checkRowKey(cols)) {
cols.add(origDbGroupScan.getRowKeyPath());
}
// Create a restricted groupscan from the primary table's groupscan
DbGroupScan restrictedGroupScan = (DbGroupScan) origDbGroupScan.getRestrictedScan(cols);
if (restrictedGroupScan == null) {
logger.error("Null restricted groupscan in NonCoveringIndexPlanGenerator.convertChild");
return null;
}
// Set left side (restricted scan) row count as rows returned from right side (index scan)
DrillScanRel rightIdxRel = new DrillScanRel(origScan.getCluster(), origScan.getTraitSet(), origScan.getTable(), origScan.getRowType(), indexContext.getScanColumns());
double rightIdxRowCount = indexGroupScan.getRowCount(indexCondition, rightIdxRel);
restrictedGroupScan.setRowCount(null, rightIdxRowCount, rightIdxRowCount);
RelTraitSet origScanTraitSet = origScan.getTraitSet();
RelTraitSet restrictedScanTraitSet = origScanTraitSet.plus(Prel.DRILL_PHYSICAL);
// Create the collation traits for restricted scan based on the index columns under the
// conditions that (a) the index actually has collation property (e.g hash indexes don't)
// and (b) if an explicit sort operation is not enforced
RelCollation collation = null;
if (indexDesc.getCollation() != null && !settings.isIndexForceSortNonCovering()) {
collation = IndexPlanUtils.buildCollationNonCoveringIndexScan(indexDesc, indexScanRowType, dbscanRowType, indexContext);
if (restrictedScanTraitSet.contains(RelCollationTraitDef.INSTANCE)) {
// replace existing trait
restrictedScanTraitSet = restrictedScanTraitSet.plus(partition).replace(collation);
} else {
// add new one
restrictedScanTraitSet = restrictedScanTraitSet.plus(partition).plus(collation);
}
}
ScanPrel dbScan = new ScanPrel(origScan.getCluster(), restrictedScanTraitSet, restrictedGroupScan, dbscanRowType, origScan.getTable());
RelNode lastLeft = dbScan;
// build the row type for the left Project
List<RexNode> leftProjectExprs = Lists.newArrayList();
int leftRowKeyIndex = getRowKeyIndex(dbScan.getRowType(), origScan);
final RelDataTypeField leftRowKeyField = dbScan.getRowType().getFieldList().get(leftRowKeyIndex);
final RelDataTypeFactory.FieldInfoBuilder leftFieldTypeBuilder = dbScan.getCluster().getTypeFactory().builder();
// We are applying the same index condition to primary table's restricted scan. The reason is, the index may be an async
// index .. i.e it is not synchronously updated along with the primary table update as part of a single transaction, so it
// is possible that after or during index scan, the primary table rows may have been updated and no longer satisfy the index
// condition. By re-applying the index condition here, we will ensure non-qualifying records are filtered out.
// The remainder condition will be applied on top of RowKeyJoin.
FilterPrel leftIndexFilterPrel = null;
if (indexDesc.isAsyncIndex()) {
leftIndexFilterPrel = new FilterPrel(dbScan.getCluster(), dbScan.getTraitSet(), dbScan, indexContext.getOrigCondition());
lastLeft = leftIndexFilterPrel;
}
RelDataType origRowType = origProject == null ? origScan.getRowType() : origProject.getRowType();
if (origProject != null) {
// then we also don't need a project
// new Project's rowtype is original Project's rowtype [plus rowkey if rowkey is not in original rowtype]
List<RelDataTypeField> origProjFields = origRowType.getFieldList();
leftFieldTypeBuilder.addAll(origProjFields);
// get the exprs from the original Project
leftProjectExprs.addAll(IndexPlanUtils.getProjects(origProject));
// add the rowkey IFF rowkey is not in orig scan
if (getRowKeyIndex(origRowType, origScan) < 0) {
leftFieldTypeBuilder.add(leftRowKeyField);
leftProjectExprs.add(RexInputRef.of(leftRowKeyIndex, dbScan.getRowType()));
}
final RelDataType leftProjectRowType = leftFieldTypeBuilder.build();
// build collation in project
if (!settings.isIndexForceSortNonCovering()) {
collation = IndexPlanUtils.buildCollationProject(leftProjectExprs, null, dbScan, functionInfo, indexContext);
}
final ProjectPrel leftIndexProjectPrel = new ProjectPrel(dbScan.getCluster(), collation != null ? dbScan.getTraitSet().plus(collation) : dbScan.getTraitSet(), leftIndexFilterPrel == null ? dbScan : leftIndexFilterPrel, leftProjectExprs, leftProjectRowType);
lastLeft = leftIndexProjectPrel;
}
final RelTraitSet leftTraits = dbScan.getTraitSet().plus(Prel.DRILL_PHYSICAL);
// final RelNode convertedLeft = convert(leftIndexProjectPrel, leftTraits);
final RelNode convertedLeft = Prule.convert(lastLeft, leftTraits);
// find the rowkey column on the left side of join
final int leftRowKeyIdx = getRowKeyIndex(convertedLeft.getRowType(), origScan);
// only rowkey field is being projected from right side
final int rightRowKeyIdx = 0;
assert leftRowKeyIdx >= 0;
List<Integer> leftJoinKeys = ImmutableList.of(leftRowKeyIdx);
List<Integer> rightJoinKeys = ImmutableList.of(rightRowKeyIdx);
RexNode joinCondition = RelOptUtil.createEquiJoinCondition(convertedLeft, leftJoinKeys, convertedRight, rightJoinKeys, builder);
RelNode newRel;
if (settings.isIndexUseHashJoinNonCovering()) {
// for hash join, collation will be cleared
HashJoinPrel hjPrel = new HashJoinPrel(topRel.getCluster(), leftTraits, convertedLeft, convertedRight, joinCondition, JoinRelType.INNER, false, /* no swap */
null, /* no runtime filter */
true, /* useful for join-restricted scans */
JoinControl.DEFAULT);
newRel = hjPrel;
} else {
// if there is collation, add to rowkey join
RowKeyJoinPrel rjPrel = new RowKeyJoinPrel(topRel.getCluster(), collation != null ? leftTraits.plus(collation) : leftTraits, convertedLeft, convertedRight, joinCondition, JoinRelType.INNER);
rjPrel.setEstimatedRowCount(finalRowCount);
newRel = rjPrel;
}
final RelDataTypeFactory.FieldInfoBuilder finalFieldTypeBuilder = origScan.getCluster().getTypeFactory().builder();
List<RelDataTypeField> rjRowFields = newRel.getRowType().getFieldList();
int toRemoveRowKeyCount = 1;
if (getRowKeyIndex(origRowType, origScan) < 0) {
toRemoveRowKeyCount = 2;
}
finalFieldTypeBuilder.addAll(rjRowFields.subList(0, rjRowFields.size() - toRemoveRowKeyCount));
final RelDataType finalProjectRowType = finalFieldTypeBuilder.build();
List<RexNode> resetExprs = Lists.newArrayList();
for (int idx = 0; idx < rjRowFields.size() - toRemoveRowKeyCount; ++idx) {
resetExprs.add(RexInputRef.of(idx, newRel.getRowType()));
}
// rewrite the collation for this projectPrel
final ProjectPrel resetProjectPrel = new ProjectPrel(newRel.getCluster(), newRel.getTraitSet(), newRel, resetExprs, finalProjectRowType);
newRel = resetProjectPrel;
if (upperProject != null) {
RelCollation newCollation = RelCollations.of(RelCollations.EMPTY.getFieldCollations());
DrillDistributionTrait newDist = null;
newDist = upperProject.getInput().getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
if (!settings.isIndexForceSortNonCovering()) {
newCollation = IndexPlanUtils.buildCollationProject(IndexPlanUtils.getProjects(upperProject), origProject, origScan, functionInfo, indexContext);
}
RelTraitSet newProjectTraits = newTraitSet(Prel.DRILL_PHYSICAL, newDist, newCollation);
ProjectPrel cap = new ProjectPrel(upperProject.getCluster(), newProjectTraits, newRel, IndexPlanUtils.getProjects(upperProject), upperProject.getRowType());
newRel = cap;
}
// whether to remove sort
if (indexContext.getSort() != null) {
// unsorted input because ordering is not guaranteed across different parallel inputs.
if (toRemoveSort(indexContext.getCollation(), newRel.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE))) {
((IndexGroupScan) indexScanPrel.getGroupScan()).setParallelizationWidth(1);
}
newRel = getSortNode(indexContext, newRel, false, true, true);
Preconditions.checkArgument(newRel != null);
}
RelNode finalRel = Prule.convert(newRel, newRel.getTraitSet());
logger.debug("NonCoveringIndexPlanGenerator got finalRel {} from origScan {}", finalRel.toString(), origScan.toString());
return finalRel;
}
use of org.apache.drill.exec.physical.base.IndexGroupScan in project drill by apache.
the class IndexIntersectPlanGenerator method buildIntersectPlan.
public RelNode buildIntersectPlan(Map.Entry<IndexDescriptor, RexNode> pair, RelNode right, boolean generateDistribution) throws InvalidRelException {
IndexDescriptor index = pair.getKey();
RexNode condition = pair.getValue();
FunctionalIndexInfo functionInfo = getFunctionalIndexInfo(index);
IndexGroupScan indexScan = index.getIndexGroupScan();
RelDataType indexScanRowType = FunctionalIndexHelper.convertRowTypeForIndexScan(origScan, indexContext.getOrigMarker(), indexScan, functionInfo);
DrillDistributionTrait partition = IndexPlanUtils.scanIsPartition(IndexPlanUtils.getGroupScan(origScan)) ? DrillDistributionTrait.RANDOM_DISTRIBUTED : DrillDistributionTrait.SINGLETON;
ScanPrel indexScanPrel = new ScanPrel(origScan.getCluster(), origScan.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(partition), indexScan, indexScanRowType, origScan.getTable());
FilterPrel indexFilterPrel = new FilterPrel(indexScanPrel.getCluster(), indexScanPrel.getTraitSet(), indexScanPrel, FunctionalIndexHelper.convertConditionForIndexScan(condition, origScan, indexScanRowType, builder, functionInfo));
// project the rowkey column from the index scan
List<RexNode> indexProjectExprs = Lists.newArrayList();
int rowKeyIndex = getRowKeyIndex(indexScanPrel.getRowType(), origScan);
assert rowKeyIndex >= 0;
indexProjectExprs.add(RexInputRef.of(rowKeyIndex, indexScanPrel.getRowType()));
final RelDataTypeFactory.FieldInfoBuilder rightFieldTypeBuilder = indexScanPrel.getCluster().getTypeFactory().builder();
// build the row type for the right Project
final List<RelDataTypeField> indexScanFields = indexScanPrel.getRowType().getFieldList();
final RelDataTypeField rightRowKeyField = indexScanFields.get(rowKeyIndex);
rightFieldTypeBuilder.add(rightRowKeyField);
final RelDataType indexProjectRowType = rightFieldTypeBuilder.build();
final ProjectPrel indexProjectPrel = new ProjectPrel(indexScanPrel.getCluster(), indexScanPrel.getTraitSet(), indexFilterPrel, indexProjectExprs, indexProjectRowType);
RelTraitSet rightSideTraits = newTraitSet().plus(Prel.DRILL_PHYSICAL);
// if build(right) side does not exist, this index scan is the right most.
if (right == null) {
if (partition == DrillDistributionTrait.RANDOM_DISTRIBUTED && settings.getSliceTarget() < indexProjectPrel.getRows()) {
final DrillDistributionTrait distRight = new DrillDistributionTrait(DistributionType.BROADCAST_DISTRIBUTED);
rightSideTraits = newTraitSet(distRight).plus(Prel.DRILL_PHYSICAL);
}
}
RelNode converted = Prule.convert(indexProjectPrel, rightSideTraits);
if (right == null) {
return converted;
}
// if build(right) side exist, the plan we got in 'converted' is left (probe). Intersect with right(build) side
RelNode finalRel = buildRowKeyJoin(converted, right, false, JoinControl.INTERSECT_DISTINCT);
if (generateDistribution && right.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE) != DrillDistributionTrait.SINGLETON) {
final DrillDistributionTrait distRight = new DrillDistributionTrait(DistributionType.BROADCAST_DISTRIBUTED);
rightSideTraits = newTraitSet(distRight).plus(Prel.DRILL_PHYSICAL);
// This join will serve as the right side for the next intersection join, if applicable
finalRel = Prule.convert(finalRel, rightSideTraits);
}
logger.trace("IndexIntersectPlanGenerator got finalRel {} from origScan {}", finalRel.toString(), origScan.toString());
return finalRel;
}
use of org.apache.drill.exec.physical.base.IndexGroupScan in project drill by apache.
the class DbScanToIndexScanPrule method processWithIndexSelection.
private void processWithIndexSelection(IndexLogicalPlanCallContext indexContext, PlannerSettings settings, RexNode condition, IndexCollection collection, RexBuilder builder) {
double totalRows = 0;
double filterRows = totalRows;
DrillScanRel scan = indexContext.scan;
if (!(indexContext.scan.getGroupScan() instanceof DbGroupScan)) {
return;
}
IndexConditionInfo.Builder infoBuilder = IndexConditionInfo.newBuilder(condition, collection, builder, indexContext.scan);
IndexConditionInfo cInfo = infoBuilder.getCollectiveInfo(indexContext);
boolean isValidIndexHint = infoBuilder.isValidIndexHint(indexContext);
if (!cInfo.hasIndexCol) {
logger.info("index_plan_info: No index columns are projected from the scan..continue.");
return;
}
if (cInfo.indexCondition == null) {
logger.info("index_plan_info: No conditions were found eligible for applying index lookup.");
return;
}
if (!indexContext.indexHint.equals("") && !isValidIndexHint) {
logger.warn("index_plan_info: Index Hint {} is not useful as index with that name is not available", indexContext.indexHint);
}
RexNode indexCondition = cInfo.indexCondition;
RexNode remainderCondition = cInfo.remainderCondition;
if (remainderCondition.isAlwaysTrue()) {
remainderCondition = null;
}
logger.debug("index_plan_info: condition split into indexcondition: {} and remaindercondition: {}", indexCondition, remainderCondition);
IndexableExprMarker indexableExprMarker = new IndexableExprMarker(indexContext.scan);
indexCondition.accept(indexableExprMarker);
indexContext.origMarker = indexableExprMarker;
if (scan.getGroupScan() instanceof DbGroupScan) {
// Initialize statistics
DbGroupScan dbScan = ((DbGroupScan) scan.getGroupScan());
if (settings.isStatisticsEnabled()) {
dbScan.getStatistics().initialize(condition, scan, indexContext);
}
totalRows = dbScan.getRowCount(null, scan);
filterRows = dbScan.getRowCount(condition, scan);
double sel = filterRows / totalRows;
if (totalRows != Statistics.ROWCOUNT_UNKNOWN && filterRows != Statistics.ROWCOUNT_UNKNOWN && !settings.isDisableFullTableScan() && !isValidIndexHint && sel > Math.max(settings.getIndexCoveringSelThreshold(), settings.getIndexNonCoveringSelThreshold())) {
// If full table scan is not disabled, generate full table scan only plans if selectivity
// is greater than covering and non-covering selectivity thresholds
logger.info("index_plan_info: Skip index planning because filter selectivity: {} is greater than thresholds {}, {}", sel, settings.getIndexCoveringSelThreshold(), settings.getIndexNonCoveringSelThreshold());
return;
}
}
if (totalRows == Statistics.ROWCOUNT_UNKNOWN || totalRows == 0 || filterRows == Statistics.ROWCOUNT_UNKNOWN) {
logger.warn("index_plan_info: Total row count is UNKNOWN or 0, or filterRows UNKNOWN; skip index planning");
return;
}
List<IndexGroup> coveringIndexes = Lists.newArrayList();
List<IndexGroup> nonCoveringIndexes = Lists.newArrayList();
List<IndexGroup> intersectIndexes = Lists.newArrayList();
// update sort expressions in context, it is needed for computing collation, so do it before IndexSelector
IndexPlanUtils.updateSortExpression(indexContext, indexContext.sort != null ? indexContext.sort.collation.getFieldCollations() : null);
IndexSelector selector = new IndexSelector(indexCondition, remainderCondition, indexContext, collection, builder, totalRows);
for (IndexDescriptor indexDesc : collection) {
logger.info("index_plan_info indexDescriptor: {}", indexDesc.toString());
// check if any of the indexed fields of the index are present in the filter condition
if (IndexPlanUtils.conditionIndexed(indexableExprMarker, indexDesc) != IndexPlanUtils.ConditionIndexed.NONE) {
if (isValidIndexHint && !indexContext.indexHint.equals(indexDesc.getIndexName())) {
logger.info("index_plan_info: Index {} is being discarded due to index Hint", indexDesc.getIndexName());
continue;
}
FunctionalIndexInfo functionInfo = indexDesc.getFunctionalInfo();
selector.addIndex(indexDesc, IndexPlanUtils.isCoveringIndex(indexContext, functionInfo), indexContext.lowerProject != null ? indexContext.lowerProject.getRowType().getFieldCount() : scan.getRowType().getFieldCount());
}
}
// get the candidate indexes based on selection
selector.getCandidateIndexes(infoBuilder, coveringIndexes, nonCoveringIndexes, intersectIndexes);
if (logger.isDebugEnabled()) {
StringBuilder strb = new StringBuilder();
if (coveringIndexes.size() > 0) {
strb.append("Covering indexes:");
for (IndexGroup index : coveringIndexes) {
strb.append(index.getIndexProps().get(0).getIndexDesc().getIndexName()).append(", ");
}
}
if (nonCoveringIndexes.size() > 0) {
strb.append("Non-covering indexes:");
for (IndexGroup index : nonCoveringIndexes) {
strb.append(index.getIndexProps().get(0).getIndexDesc().getIndexName()).append(", ");
}
}
logger.debug("index_plan_info: IndexSelector return: {}", strb.toString());
}
GroupScan primaryTableScan = indexContext.scan.getGroupScan();
// TODO: this logic for intersect should eventually be migrated to the IndexSelector
if (coveringIndexes.size() == 0 && nonCoveringIndexes.size() > 1) {
List<IndexDescriptor> indexList = Lists.newArrayList();
for (IndexGroup index : nonCoveringIndexes) {
IndexDescriptor indexDesc = index.getIndexProps().get(0).getIndexDesc();
IndexGroupScan idxScan = indexDesc.getIndexGroupScan();
// Copy primary table statistics to index table
idxScan.setStatistics(((DbGroupScan) primaryTableScan).getStatistics());
indexList.add(index.getIndexProps().get(0).getIndexDesc());
}
Map<IndexDescriptor, IndexConditionInfo> indexInfoMap = infoBuilder.getIndexConditionMap(indexList);
// no usable index
if (indexInfoMap == null || indexInfoMap.size() == 0) {
logger.info("index_plan_info: skipping intersect plan generation as there is no usable index");
return;
}
// some part of filter condition needs to apply on primary table.
if (indexInfoMap.size() > 1) {
logger.info("index_plan_info: intersect plan is generated");
if (logger.isDebugEnabled()) {
List<String> indices = new ArrayList<>(nonCoveringIndexes.size());
for (IndexGroup index : nonCoveringIndexes) {
indices.add(index.getIndexProps().get(0).getIndexDesc().getIndexName());
}
logger.debug("index_plan_info: intersect plan is generated on index list {}", indices);
}
boolean intersectPlanGenerated = false;
// TODO: make sure the smallest selectivity of these indexes times rowcount smaller than broadcast threshold
for (IndexGroup index : intersectIndexes) {
List<IndexDescriptor> candidateDesc = Lists.newArrayList();
for (IndexProperties candProp : index.getIndexProps()) {
candidateDesc.add(candProp.getIndexDesc());
}
Map<IndexDescriptor, IndexConditionInfo> intersectIdxInfoMap = infoBuilder.getIndexConditionMap(candidateDesc);
IndexIntersectPlanGenerator planGen = new IndexIntersectPlanGenerator(indexContext, intersectIdxInfoMap, builder, settings);
try {
planGen.go();
intersectPlanGenerated = true;
} catch (Exception e) {
// If error while generating intersect plans, continue onto generating non-covering plans
logger.warn("index_plan_info: Exception while trying to generate intersect index plan", e);
}
}
// If intersect plans are forced do not generate further non-covering plans
if (intersectPlanGenerated && settings.isIndexIntersectPlanPreferred()) {
return;
}
}
}
try {
for (IndexGroup index : coveringIndexes) {
IndexProperties indexProps = index.getIndexProps().get(0);
IndexDescriptor indexDesc = indexProps.getIndexDesc();
IndexGroupScan idxScan = indexDesc.getIndexGroupScan();
FunctionalIndexInfo indexInfo = indexDesc.getFunctionalInfo();
indexCondition = indexProps.getLeadingColumnsFilter();
remainderCondition = indexProps.getTotalRemainderFilter();
// Copy primary table statistics to index table
idxScan.setStatistics(((DbGroupScan) scan.getGroupScan()).getStatistics());
logger.info("index_plan_info: Generating covering index plan for index: {}, query condition {}", indexDesc.getIndexName(), indexCondition.toString());
CoveringIndexPlanGenerator planGen = new CoveringIndexPlanGenerator(indexContext, indexInfo, idxScan, indexCondition, remainderCondition, builder, settings);
planGen.go();
}
} catch (Exception e) {
logger.warn("Exception while trying to generate covering index plan", e);
}
// First, check if the primary table scan supports creating a restricted scan
if (primaryTableScan instanceof DbGroupScan && (((DbGroupScan) primaryTableScan).supportsRestrictedScan())) {
try {
for (IndexGroup index : nonCoveringIndexes) {
IndexProperties indexProps = index.getIndexProps().get(0);
IndexDescriptor indexDesc = indexProps.getIndexDesc();
IndexGroupScan idxScan = indexDesc.getIndexGroupScan();
indexCondition = indexProps.getLeadingColumnsFilter();
remainderCondition = indexProps.getTotalRemainderFilter();
// Copy primary table statistics to index table
idxScan.setStatistics(((DbGroupScan) primaryTableScan).getStatistics());
logger.info("index_plan_info: Generating non-covering index plan for index: {}, query condition {}", indexDesc.getIndexName(), indexCondition.toString());
NonCoveringIndexPlanGenerator planGen = new NonCoveringIndexPlanGenerator(indexContext, indexDesc, idxScan, indexCondition, remainderCondition, builder, settings);
planGen.go();
}
} catch (Exception e) {
logger.warn("Exception while trying to generate non-covering index access plan", e);
}
}
}
use of org.apache.drill.exec.physical.base.IndexGroupScan in project drill by apache.
the class DrillIndexDescriptor method getIndexGroupScan.
@Override
public IndexGroupScan getIndexGroupScan() {
try {
final DrillTable idxTable = getDrillTable();
GroupScan scan = idxTable.getGroupScan();
if (!(scan instanceof IndexGroupScan)) {
logger.error("The Groupscan from table {} is not an IndexGroupScan", idxTable.toString());
return null;
}
return (IndexGroupScan) scan;
} catch (IOException e) {
logger.error("Error in getIndexGroupScan ", e);
}
return null;
}
Aggregations