use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by axbaretto.
the class HivePushPartitionFilterIntoScan method getFilterOnScan.
public static final StoragePluginOptimizerRule getFilterOnScan(OptimizerRulesContext optimizerRulesContext, final String defaultPartitionValue) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.any(DrillScanRel.class)), "HivePushPartitionFilterIntoScan:Filter_On_Scan_Hive", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new HivePartitionDescriptor(settings, (DrillScanRel) scanRel, getOptimizerRulesContext().getManagedBuffer(), defaultPartitionValue);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = (DrillScanRel) call.rel(1);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for Hive based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillScanRel scanRel = call.rel(1);
doOnMatch(call, filterRel, null, scanRel);
}
};
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by axbaretto.
the class ParquetPruneScanRule method getFilterOnProjectParquet.
public static final RelOptRule getFilterOnProjectParquet(OptimizerRulesContext optimizerRulesContext) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.some(DrillProjectRel.class, RelOptHelper.any(DrillScanRel.class))), "PruneScanRule:Filter_On_Project_Parquet", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new ParquetPartitionDescriptor(settings, (DrillScanRel) scanRel);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = call.rel(2);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for parquet based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof ParquetGroupScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof ParquetGroupScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillProjectRel projectRel = call.rel(1);
final DrillScanRel scanRel = call.rel(2);
doOnMatch(call, filterRel, projectRel, scanRel);
}
};
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by axbaretto.
the class PruneScanRule method doOnMatch.
protected void doOnMatch(RelOptRuleCall call, Filter filterRel, Project projectRel, TableScan scanRel) {
final String pruningClassName = getClass().getName();
logger.info("Beginning partition pruning, pruning class: {}", pruningClassName);
Stopwatch totalPruningTime = Stopwatch.createStarted();
final PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
PartitionDescriptor descriptor = getPartitionDescriptor(settings, scanRel);
final BufferAllocator allocator = optimizerContext.getAllocator();
final Object selection = getDrillTable(scanRel).getSelection();
MetadataContext metaContext = null;
if (selection instanceof FormatSelection) {
metaContext = ((FormatSelection) selection).getSelection().getMetaContext();
}
RexNode condition;
if (projectRel == null) {
condition = filterRel.getCondition();
} else {
// get the filter as if it were below the projection.
condition = RelOptUtil.pushFilterPastProject(filterRel.getCondition(), projectRel);
}
RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, filterRel.getCluster().getRexBuilder());
condition = condition.accept(visitor);
Map<Integer, String> fieldNameMap = Maps.newHashMap();
List<String> fieldNames = scanRel.getRowType().getFieldNames();
BitSet columnBitset = new BitSet();
BitSet partitionColumnBitSet = new BitSet();
Map<Integer, Integer> partitionMap = Maps.newHashMap();
int relColIndex = 0;
for (String field : fieldNames) {
final Integer partitionIndex = descriptor.getIdIfValid(field);
if (partitionIndex != null) {
fieldNameMap.put(partitionIndex, field);
partitionColumnBitSet.set(partitionIndex);
columnBitset.set(relColIndex);
// mapping between the relColIndex and partitionIndex
partitionMap.put(relColIndex, partitionIndex);
}
relColIndex++;
}
if (partitionColumnBitSet.isEmpty()) {
logger.info("No partition columns are projected from the scan..continue. " + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
// stop watch to track how long we spend in different phases of pruning
Stopwatch miscTimer = Stopwatch.createUnstarted();
// track how long we spend building the filter tree
miscTimer.start();
FindPartitionConditions c = new FindPartitionConditions(columnBitset, filterRel.getCluster().getRexBuilder());
c.analyze(condition);
RexNode pruneCondition = c.getFinalCondition();
BitSet referencedDirsBitSet = c.getReferencedDirs();
logger.info("Total elapsed time to build and analyze filter tree: {} ms", miscTimer.elapsed(TimeUnit.MILLISECONDS));
miscTimer.reset();
if (pruneCondition == null) {
logger.info("No conditions were found eligible for partition pruning." + "Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
// set up the partitions
List<PartitionLocation> newPartitions = Lists.newArrayList();
// total number of partitions
long numTotal = 0;
int batchIndex = 0;
PartitionLocation firstLocation = null;
LogicalExpression materializedExpr = null;
String[] spInfo = null;
int maxIndex = -1;
BitSet matchBitSet = new BitSet();
// Outer loop: iterate over a list of batches of PartitionLocations
for (List<PartitionLocation> partitions : descriptor) {
numTotal += partitions.size();
logger.debug("Evaluating partition pruning for batch {}", batchIndex);
if (batchIndex == 0) {
// save the first location in case everything is pruned
firstLocation = partitions.get(0);
}
final NullableBitVector output = new NullableBitVector(MaterializedField.create("", Types.optional(MinorType.BIT)), allocator);
final VectorContainer container = new VectorContainer();
try {
final ValueVector[] vectors = new ValueVector[descriptor.getMaxHierarchyLevel()];
for (int partitionColumnIndex : BitSets.toIter(partitionColumnBitSet)) {
SchemaPath column = SchemaPath.getSimplePath(fieldNameMap.get(partitionColumnIndex));
MajorType type = descriptor.getVectorType(column, settings);
MaterializedField field = MaterializedField.create(column.getLastSegment().getNameSegment().getPath(), type);
ValueVector v = TypeHelper.getNewVector(field, allocator);
v.allocateNew();
vectors[partitionColumnIndex] = v;
container.add(v);
}
// track how long we spend populating partition column vectors
miscTimer.start();
// populate partition vectors.
descriptor.populatePartitionVectors(vectors, partitions, partitionColumnBitSet, fieldNameMap);
logger.info("Elapsed time to populate partitioning column vectors: {} ms within batchIndex: {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex);
miscTimer.reset();
// materialize the expression; only need to do this once
if (batchIndex == 0) {
materializedExpr = materializePruneExpr(pruneCondition, settings, scanRel, container);
if (materializedExpr == null) {
// continue without partition pruning; no need to log anything here since
// materializePruneExpr logs it already
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
return;
}
}
output.allocateNew(partitions.size());
// start the timer to evaluate how long we spend in the interpreter evaluation
miscTimer.start();
InterpreterEvaluator.evaluate(partitions.size(), optimizerContext, container, output, materializedExpr);
logger.info("Elapsed time in interpreter evaluation: {} ms within batchIndex: {} with # of partitions : {}", miscTimer.elapsed(TimeUnit.MILLISECONDS), batchIndex, partitions.size());
miscTimer.reset();
int recordCount = 0;
int qualifiedCount = 0;
if (descriptor.supportsMetadataCachePruning() && partitions.get(0).isCompositePartition()) /* apply single partition check only for composite partitions */
{
// Inner loop: within each batch iterate over the PartitionLocations
for (PartitionLocation part : partitions) {
assert part.isCompositePartition();
if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
newPartitions.add(part);
// Rather than using the PartitionLocation, get the array of partition values for the directories that are
// referenced by the filter since we are not interested in directory references in other parts of the query.
Pair<String[], Integer> p = composePartition(referencedDirsBitSet, partitionMap, vectors, recordCount);
String[] parts = p.getLeft();
int tmpIndex = p.getRight();
maxIndex = Math.max(maxIndex, tmpIndex);
if (spInfo == null) {
// initialization
spInfo = parts;
for (int j = 0; j <= tmpIndex; j++) {
if (parts[j] != null) {
matchBitSet.set(j);
}
}
} else {
// compare the new partition with existing partition
for (int j = 0; j <= tmpIndex; j++) {
if (parts[j] == null || spInfo[j] == null) {
// nulls don't match
matchBitSet.clear(j);
} else {
if (!parts[j].equals(spInfo[j])) {
matchBitSet.clear(j);
}
}
}
}
qualifiedCount++;
}
recordCount++;
}
} else {
// Inner loop: within each batch iterate over the PartitionLocations
for (PartitionLocation part : partitions) {
if (!output.getAccessor().isNull(recordCount) && output.getAccessor().get(recordCount) == 1) {
newPartitions.add(part);
qualifiedCount++;
}
recordCount++;
}
}
logger.debug("Within batch {}: total records: {}, qualified records: {}", batchIndex, recordCount, qualifiedCount);
batchIndex++;
} catch (Exception e) {
logger.warn("Exception while trying to prune partition.", e);
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
setPruneStatus(metaContext, PruneStatus.NOT_PRUNED);
// continue without partition pruning
return;
} finally {
container.clear();
if (output != null) {
output.clear();
}
}
}
try {
if (newPartitions.size() == numTotal) {
logger.info("No partitions were eligible for pruning");
return;
}
// handle the case all partitions are filtered out.
boolean canDropFilter = true;
boolean wasAllPartitionsPruned = false;
String cacheFileRoot = null;
if (newPartitions.isEmpty()) {
assert firstLocation != null;
// Add the first non-composite partition location, since execution requires schema.
// In such case, we should not drop filter.
newPartitions.add(firstLocation.getPartitionLocationRecursive().get(0));
canDropFilter = false;
// NOTE: with DRILL-4530, the PruneScanRule may be called with only a list of
// directories first and the non-composite partition location will still return
// directories, not files. So, additional processing is done depending on this flag
wasAllPartitionsPruned = true;
logger.info("All {} partitions were pruned; added back a single partition to allow creating a schema", numTotal);
// set the cacheFileRoot appropriately
if (firstLocation.isCompositePartition()) {
cacheFileRoot = descriptor.getBaseTableLocation() + firstLocation.getCompositePartitionPath();
}
}
logger.info("Pruned {} partitions down to {}", numTotal, newPartitions.size());
List<RexNode> conjuncts = RelOptUtil.conjunctions(condition);
List<RexNode> pruneConjuncts = RelOptUtil.conjunctions(pruneCondition);
conjuncts.removeAll(pruneConjuncts);
RexNode newCondition = RexUtil.composeConjunction(filterRel.getCluster().getRexBuilder(), conjuncts, false);
RewriteCombineBinaryOperators reverseVisitor = new RewriteCombineBinaryOperators(true, filterRel.getCluster().getRexBuilder());
condition = condition.accept(reverseVisitor);
pruneCondition = pruneCondition.accept(reverseVisitor);
if (descriptor.supportsMetadataCachePruning() && !wasAllPartitionsPruned) {
// if metadata cache file could potentially be used, then assign a proper cacheFileRoot
int index = -1;
if (!matchBitSet.isEmpty()) {
String path = "";
index = matchBitSet.length() - 1;
for (int j = 0; j < matchBitSet.length(); j++) {
if (!matchBitSet.get(j)) {
// stop at the first index with no match and use the immediate
// previous index
index = j - 1;
break;
}
}
for (int j = 0; j <= index; j++) {
path += "/" + spInfo[j];
}
cacheFileRoot = descriptor.getBaseTableLocation() + path;
}
if (index != maxIndex) {
// if multiple partitions are being selected, we should not drop the filter
// since we are reading the cache file at a parent/ancestor level
canDropFilter = false;
}
}
RelNode inputRel = descriptor.supportsMetadataCachePruning() ? descriptor.createTableScan(newPartitions, cacheFileRoot, wasAllPartitionsPruned, metaContext) : descriptor.createTableScan(newPartitions, wasAllPartitionsPruned);
if (projectRel != null) {
inputRel = projectRel.copy(projectRel.getTraitSet(), Collections.singletonList(inputRel));
}
if (newCondition.isAlwaysTrue() && canDropFilter) {
call.transformTo(inputRel);
} else {
final RelNode newFilter = filterRel.copy(filterRel.getTraitSet(), Collections.singletonList(inputRel));
call.transformTo(newFilter);
}
setPruneStatus(metaContext, PruneStatus.PRUNED);
} catch (Exception e) {
logger.warn("Exception while using the pruned partitions.", e);
} finally {
logger.info("Total pruning elapsed time: {} ms", totalPruningTime.elapsed(TimeUnit.MILLISECONDS));
}
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by axbaretto.
the class DrillReduceAggregatesRule method reduceSum.
private RexNode reduceSum(Aggregate oldAggRel, AggregateCall oldCall, List<AggregateCall> newCalls, Map<AggregateCall, RexNode> aggCallMapping) {
final PlannerSettings plannerSettings = (PlannerSettings) oldAggRel.getCluster().getPlanner().getContext();
final boolean isInferenceEnabled = plannerSettings.isTypeInferenceEnabled();
final int nGroups = oldAggRel.getGroupCount();
RelDataTypeFactory typeFactory = oldAggRel.getCluster().getTypeFactory();
RexBuilder rexBuilder = oldAggRel.getCluster().getRexBuilder();
int arg = oldCall.getArgList().get(0);
RelDataType argType = getFieldType(oldAggRel.getInput(), arg);
final RelDataType sumType;
final SqlAggFunction sumZeroAgg;
if (isInferenceEnabled) {
sumType = oldCall.getType();
} else {
sumType = typeFactory.createTypeWithNullability(oldCall.getType(), argType.isNullable());
}
sumZeroAgg = new DrillCalciteSqlAggFunctionWrapper(new SqlSumEmptyIsZeroAggFunction(), sumType);
AggregateCall sumZeroCall = AggregateCall.create(sumZeroAgg, oldCall.isDistinct(), oldCall.isApproximate(), oldCall.getArgList(), -1, sumType, null);
final SqlCountAggFunction countAgg = (SqlCountAggFunction) SqlStdOperatorTable.COUNT;
final RelDataType countType = countAgg.getReturnType(typeFactory);
AggregateCall countCall = AggregateCall.create(countAgg, oldCall.isDistinct(), oldCall.isApproximate(), oldCall.getArgList(), -1, countType, null);
// NOTE: these references are with respect to the output
// of newAggRel
RexNode sumZeroRef = rexBuilder.addAggCall(sumZeroCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(argType));
if (!oldCall.getType().isNullable()) {
// null). Therefore we translate to SUM0(x).
return sumZeroRef;
}
RexNode countRef = rexBuilder.addAggCall(countCall, nGroups, oldAggRel.indicator, newCalls, aggCallMapping, ImmutableList.of(argType));
return rexBuilder.makeCall(SqlStdOperatorTable.CASE, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, countRef, rexBuilder.makeExactLiteral(BigDecimal.ZERO)), rexBuilder.constantNull(), sumZeroRef);
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by axbaretto.
the class PlanningBase method testSqlPlan.
protected void testSqlPlan(String sqlCommands) throws Exception {
final DrillbitContext dbContext = mock(DrillbitContext.class);
final QueryContext context = mock(QueryContext.class);
final String[] sqlStrings = sqlCommands.split(";");
final LocalPersistentStoreProvider provider = new LocalPersistentStoreProvider(config);
provider.start();
final ScanResult scanResult = ClassPathScanner.fromPrescan(config);
final LogicalPlanPersistence logicalPlanPersistence = new LogicalPlanPersistence(config, scanResult);
final SystemOptionManager systemOptions = new SystemOptionManager(logicalPlanPersistence, provider, config);
systemOptions.init();
@SuppressWarnings("resource") final UserSession userSession = UserSession.Builder.newBuilder().withOptionManager(systemOptions).build();
final SessionOptionManager sessionOptions = userSession.getOptions();
final QueryOptionManager queryOptions = new QueryOptionManager(sessionOptions);
final ExecutionControls executionControls = new ExecutionControls(queryOptions, DrillbitEndpoint.getDefaultInstance());
when(dbContext.getMetrics()).thenReturn(new MetricRegistry());
when(dbContext.getAllocator()).thenReturn(allocator);
when(dbContext.getConfig()).thenReturn(config);
when(dbContext.getOptionManager()).thenReturn(systemOptions);
when(dbContext.getStoreProvider()).thenReturn(provider);
when(dbContext.getClasspathScan()).thenReturn(scanResult);
when(dbContext.getLpPersistence()).thenReturn(logicalPlanPersistence);
final StoragePluginRegistry registry = new StoragePluginRegistryImpl(dbContext);
registry.init();
final FunctionImplementationRegistry functionRegistry = new FunctionImplementationRegistry(config);
final DrillOperatorTable table = new DrillOperatorTable(functionRegistry, systemOptions);
final SchemaPlus root = CalciteSchema.createRootSchema(false, false).plus();
registry.getSchemaFactory().registerSchemas(SchemaConfig.newBuilder("foo", context).build(), root);
when(context.getNewDefaultSchema()).thenReturn(root);
when(context.getLpPersistence()).thenReturn(new LogicalPlanPersistence(config, ClassPathScanner.fromPrescan(config)));
when(context.getStorage()).thenReturn(registry);
when(context.getFunctionRegistry()).thenReturn(functionRegistry);
when(context.getSession()).thenReturn(UserSession.Builder.newBuilder().withOptionManager(sessionOptions).setSupportComplexTypes(true).build());
when(context.getCurrentEndpoint()).thenReturn(DrillbitEndpoint.getDefaultInstance());
when(context.getActiveEndpoints()).thenReturn(ImmutableList.of(DrillbitEndpoint.getDefaultInstance()));
when(context.getPlannerSettings()).thenReturn(new PlannerSettings(queryOptions, functionRegistry));
when(context.getOptions()).thenReturn(queryOptions);
when(context.getConfig()).thenReturn(config);
when(context.getDrillOperatorTable()).thenReturn(table);
when(context.getAllocator()).thenReturn(allocator);
when(context.getExecutionControls()).thenReturn(executionControls);
when(context.getLpPersistence()).thenReturn(logicalPlanPersistence);
for (final String sql : sqlStrings) {
if (sql.trim().isEmpty()) {
continue;
}
@SuppressWarnings("unused") final PhysicalPlan p = DrillSqlWorker.getPlan(context, sql);
}
}
Aggregations