use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by apache.
the class ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
try {
DrillScanRel hiveScanRel = call.rel(0);
PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
HiveScan hiveScan = (HiveScan) hiveScanRel.getGroupScan();
HiveReadEntry hiveReadEntry = hiveScan.getHiveReadEntry();
HiveMetadataProvider hiveMetadataProvider = new HiveMetadataProvider(hiveScan.getUserName(), hiveReadEntry, hiveScan.getHiveConf());
if (hiveMetadataProvider.getInputSplits(hiveReadEntry).isEmpty()) {
// table is empty, use original scan
return;
}
if (hiveScan.getHiveReadEntry().getTable().isSetPartitionKeys()) {
logger.warn("Hive MapR-DB JSON Handler doesn't support table partitioning. Consider recreating table without " + "partitions");
}
DrillScanRel nativeScanRel = createNativeScanRel(hiveScanRel, settings);
call.transformTo(nativeScanRel);
/*
Drill native scan should take precedence over Hive since it's more efficient and faster.
Hive does not always give correct costing (i.e. for external tables Hive does not have number of rows
and we calculate them approximately). On the contrary, Drill calculates number of rows exactly
and thus Hive Scan can be chosen instead of Drill native scan because costings allegedly lower for Hive.
To ensure Drill MapR-DB Json scan will be chosen, reduce Hive scan importance to 0.
*/
call.getPlanner().setImportance(hiveScanRel, 0.0);
} catch (Exception e) {
// TODO: Improve error handling after allowing to throw IOException from StoragePlugin.getFormatPlugin()
logger.warn("Failed to convert HiveScan to JsonScanSpec. Fallback to HiveMapR-DB connector.", e);
}
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by apache.
the class DrillConstExecutor method reduce.
@Override
@SuppressWarnings("deprecation")
public void reduce(RexBuilder rexBuilder, List<RexNode> constExps, List<RexNode> reducedValues) {
for (RexNode newCall : constExps) {
LogicalExpression logEx = DrillOptiq.toDrill(new DrillParseContext(plannerSettings), (RelNode) null, /* input rel */
newCall);
ErrorCollectorImpl errors = new ErrorCollectorImpl();
LogicalExpression materializedExpr = ExpressionTreeMaterializer.materialize(logEx, null, errors, funcImplReg);
if (errors.getErrorCount() != 0) {
String message = String.format("Failure while materializing expression in constant expression evaluator [%s]. Errors: %s", newCall.toString(), errors.toString());
throw UserException.planError().message(message).build(logger);
}
if (NON_REDUCIBLE_TYPES.contains(materializedExpr.getMajorType().getMinorType())) {
logger.debug("Constant expression not folded due to return type {}, complete expression: {}", materializedExpr.getMajorType(), ExpressionStringBuilder.toString(materializedExpr));
reducedValues.add(newCall);
continue;
}
ValueHolder output = InterpreterEvaluator.evaluateConstantExpr(udfUtilities, materializedExpr);
RelDataTypeFactory typeFactory = rexBuilder.getTypeFactory();
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL && TypeHelper.isNull(output)) {
SqlTypeName sqlTypeName = TypeInferenceUtils.getCalciteTypeFromDrillType(materializedExpr.getMajorType().getMinorType());
if (sqlTypeName == null) {
String message = String.format("Error reducing constant expression, unsupported type: %s.", materializedExpr.getMajorType().getMinorType());
throw UserException.unsupportedError().message(message).build(logger);
}
RelDataType type = TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, sqlTypeName, true);
reducedValues.add(rexBuilder.makeNullLiteral(type));
continue;
}
Function<ValueHolder, RexNode> literator = valueHolder -> {
switch(materializedExpr.getMajorType().getMinorType()) {
case INT:
{
int value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? ((NullableIntHolder) valueHolder).value : ((IntHolder) valueHolder).value;
return rexBuilder.makeLiteral(new BigDecimal(value), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.INTEGER, newCall.getType().isNullable()), false);
}
case BIGINT:
{
long value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? ((NullableBigIntHolder) valueHolder).value : ((BigIntHolder) valueHolder).value;
return rexBuilder.makeLiteral(new BigDecimal(value), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.BIGINT, newCall.getType().isNullable()), false);
}
case FLOAT4:
{
float value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? ((NullableFloat4Holder) valueHolder).value : ((Float4Holder) valueHolder).value;
// BigDecimal cannot represent them.
if (!Float.isFinite(value)) {
return rexBuilder.makeLiteral(Float.toString(value));
}
return rexBuilder.makeLiteral(new BigDecimal(value), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.FLOAT, newCall.getType().isNullable()), false);
}
case FLOAT8:
{
double value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? ((NullableFloat8Holder) valueHolder).value : ((Float8Holder) valueHolder).value;
// BigDecimal cannot represent them.
if (!Double.isFinite(value)) {
return rexBuilder.makeLiteral(Double.toString(value));
}
return rexBuilder.makeLiteral(new BigDecimal(value), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.DOUBLE, newCall.getType().isNullable()), false);
}
case VARCHAR:
{
String value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? StringFunctionHelpers.getStringFromVarCharHolder((NullableVarCharHolder) valueHolder) : StringFunctionHelpers.getStringFromVarCharHolder((VarCharHolder) valueHolder);
RelDataType type = typeFactory.createSqlType(SqlTypeName.VARCHAR, newCall.getType().getPrecision());
RelDataType typeWithNullability = typeFactory.createTypeWithNullability(type, newCall.getType().isNullable());
return rexBuilder.makeLiteral(value, typeWithNullability, false);
}
case BIT:
{
boolean value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? ((NullableBitHolder) valueHolder).value == 1 : ((BitHolder) valueHolder).value == 1;
return rexBuilder.makeLiteral(value, TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.BOOLEAN, newCall.getType().isNullable()), false);
}
case DATE:
{
Calendar value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? new DateTime(((NullableDateHolder) valueHolder).value, DateTimeZone.UTC).toCalendar(null) : new DateTime(((DateHolder) valueHolder).value, DateTimeZone.UTC).toCalendar(null);
return rexBuilder.makeLiteral(DateString.fromCalendarFields(value), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.DATE, newCall.getType().isNullable()), false);
}
case DECIMAL9:
{
long value;
int scale;
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) {
NullableDecimal9Holder decimal9Out = (NullableDecimal9Holder) valueHolder;
value = decimal9Out.value;
scale = decimal9Out.scale;
} else {
Decimal9Holder decimal9Out = (Decimal9Holder) valueHolder;
value = decimal9Out.value;
scale = decimal9Out.scale;
}
return rexBuilder.makeLiteral(new BigDecimal(BigInteger.valueOf(value), scale), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.DECIMAL, newCall.getType().isNullable()), false);
}
case DECIMAL18:
{
long value;
int scale;
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) {
NullableDecimal18Holder decimal18Out = (NullableDecimal18Holder) valueHolder;
value = decimal18Out.value;
scale = decimal18Out.scale;
} else {
Decimal18Holder decimal18Out = (Decimal18Holder) valueHolder;
value = decimal18Out.value;
scale = decimal18Out.scale;
}
return rexBuilder.makeLiteral(new BigDecimal(BigInteger.valueOf(value), scale), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.DECIMAL, newCall.getType().isNullable()), false);
}
case VARDECIMAL:
{
DrillBuf buffer;
int start;
int end;
int scale;
int precision;
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) {
NullableVarDecimalHolder varDecimalHolder = (NullableVarDecimalHolder) valueHolder;
buffer = varDecimalHolder.buffer;
start = varDecimalHolder.start;
end = varDecimalHolder.end;
scale = varDecimalHolder.scale;
precision = varDecimalHolder.precision;
} else {
VarDecimalHolder varDecimalHolder = (VarDecimalHolder) valueHolder;
buffer = varDecimalHolder.buffer;
start = varDecimalHolder.start;
end = varDecimalHolder.end;
scale = varDecimalHolder.scale;
precision = varDecimalHolder.precision;
}
return rexBuilder.makeLiteral(org.apache.drill.exec.util.DecimalUtility.getBigDecimalFromDrillBuf(buffer, start, end - start, scale), typeFactory.createSqlType(SqlTypeName.DECIMAL, precision, scale), false);
}
case DECIMAL28SPARSE:
{
DrillBuf buffer;
int start;
int scale;
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) {
NullableDecimal28SparseHolder decimal28Out = (NullableDecimal28SparseHolder) valueHolder;
buffer = decimal28Out.buffer;
start = decimal28Out.start;
scale = decimal28Out.scale;
} else {
Decimal28SparseHolder decimal28Out = (Decimal28SparseHolder) valueHolder;
buffer = decimal28Out.buffer;
start = decimal28Out.start;
scale = decimal28Out.scale;
}
return rexBuilder.makeLiteral(org.apache.drill.exec.util.DecimalUtility.getBigDecimalFromSparse(buffer, start * 20, 5, scale), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.DECIMAL, newCall.getType().isNullable()), false);
}
case DECIMAL38SPARSE:
{
DrillBuf buffer;
int start;
int scale;
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) {
NullableDecimal38SparseHolder decimal38Out = (NullableDecimal38SparseHolder) valueHolder;
buffer = decimal38Out.buffer;
start = decimal38Out.start;
scale = decimal38Out.scale;
} else {
Decimal38SparseHolder decimal38Out = (Decimal38SparseHolder) valueHolder;
buffer = decimal38Out.buffer;
start = decimal38Out.start;
scale = decimal38Out.scale;
}
return rexBuilder.makeLiteral(org.apache.drill.exec.util.DecimalUtility.getBigDecimalFromSparse(buffer, start * 24, 6, scale), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.DECIMAL, newCall.getType().isNullable()), false);
}
case TIME:
{
Calendar value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? new DateTime(((NullableTimeHolder) valueHolder).value, DateTimeZone.UTC).toCalendar(null) : new DateTime(((TimeHolder) valueHolder).value, DateTimeZone.UTC).toCalendar(null);
RelDataType type = typeFactory.createSqlType(SqlTypeName.TIME, newCall.getType().getPrecision());
RelDataType typeWithNullability = typeFactory.createTypeWithNullability(type, newCall.getType().isNullable());
return rexBuilder.makeLiteral(TimeString.fromCalendarFields(value), typeWithNullability, false);
}
case TIMESTAMP:
{
Calendar value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? new DateTime(((NullableTimeStampHolder) valueHolder).value, DateTimeZone.UTC).toCalendar(null) : new DateTime(((TimeStampHolder) valueHolder).value, DateTimeZone.UTC).toCalendar(null);
RelDataType type = typeFactory.createSqlType(SqlTypeName.TIMESTAMP, newCall.getType().getPrecision());
RelDataType typeWithNullability = typeFactory.createTypeWithNullability(type, newCall.getType().isNullable());
return rexBuilder.makeLiteral(TimestampString.fromCalendarFields(value), typeWithNullability, false);
}
case INTERVALYEAR:
{
BigDecimal value = (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) ? new BigDecimal(((NullableIntervalYearHolder) valueHolder).value) : new BigDecimal(((IntervalYearHolder) valueHolder).value);
return rexBuilder.makeLiteral(value, TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.INTERVAL_YEAR_MONTH, newCall.getType().isNullable()), false);
}
case INTERVALDAY:
{
int days;
int milliseconds;
if (materializedExpr.getMajorType().getMode() == TypeProtos.DataMode.OPTIONAL) {
NullableIntervalDayHolder intervalDayOut = (NullableIntervalDayHolder) valueHolder;
days = intervalDayOut.days;
milliseconds = intervalDayOut.milliseconds;
} else {
IntervalDayHolder intervalDayOut = (IntervalDayHolder) valueHolder;
days = intervalDayOut.days;
milliseconds = intervalDayOut.milliseconds;
}
return rexBuilder.makeLiteral(new BigDecimal(days * (long) DateUtilities.daysToStandardMillis + milliseconds), TypeInferenceUtils.createCalciteTypeWithNullability(typeFactory, SqlTypeName.INTERVAL_DAY, newCall.getType().isNullable()), false);
}
// as new types may be added in the future.
default:
logger.debug("Constant expression not folded due to return type {}, complete expression: {}", materializedExpr.getMajorType(), ExpressionStringBuilder.toString(materializedExpr));
return newCall;
}
};
reducedValues.add(literator.apply(output));
}
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by apache.
the class DbScanToIndexScanPrule method doOnMatch.
protected void doOnMatch(IndexLogicalPlanCallContext indexContext) {
Stopwatch indexPlanTimer = Stopwatch.createStarted();
final PlannerSettings settings = PrelUtil.getPlannerSettings(indexContext.call.getPlanner());
final IndexCollection indexCollection = getIndexCollection(settings, indexContext.scan);
if (indexCollection == null) {
return;
}
logger.debug("Index Rule {} starts", this.description);
RexBuilder builder = indexContext.filter.getCluster().getRexBuilder();
RexNode condition = null;
if (indexContext.lowerProject == null) {
condition = indexContext.filter.getCondition();
} else {
// get the filter as if it were below the projection.
condition = RelOptUtil.pushFilterPastProject(indexContext.filter.getCondition(), indexContext.lowerProject);
}
// save this pushed down condition, in case it is needed later to build filter when joining back primary table
indexContext.origPushedCondition = condition;
RewriteAsBinaryOperators visitor = new RewriteAsBinaryOperators(true, builder);
condition = condition.accept(visitor);
if (indexCollection.supportsIndexSelection()) {
try {
processWithIndexSelection(indexContext, settings, condition, indexCollection, builder);
} catch (Exception e) {
logger.warn("Exception while doing index planning ", e);
}
} else {
throw new UnsupportedOperationException("Index collection must support index selection");
}
indexPlanTimer.stop();
logger.info("index_plan_info: Index Planning took {} ms", indexPlanTimer.elapsed(TimeUnit.MILLISECONDS));
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by apache.
the class ParquetPruneScanRule method getFilterOnScanParquet.
public static RelOptRule getFilterOnScanParquet(OptimizerRulesContext optimizerRulesContext) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.any(DrillScanRel.class)), "PruneScanRule:Filter_On_Scan_Parquet", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new ParquetPartitionDescriptor(settings, (DrillScanRel) scanRel);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = call.rel(1);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for parquet based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof AbstractParquetGroupScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof AbstractParquetGroupScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillScanRel scanRel = call.rel(1);
doOnMatch(call, filterRel, null, scanRel);
}
};
}
use of org.apache.drill.exec.planner.physical.PlannerSettings in project drill by axbaretto.
the class HivePushPartitionFilterIntoScan method getFilterOnProject.
public static final StoragePluginOptimizerRule getFilterOnProject(OptimizerRulesContext optimizerRulesContext, final String defaultPartitionValue) {
return new PruneScanRule(RelOptHelper.some(DrillFilterRel.class, RelOptHelper.some(DrillProjectRel.class, RelOptHelper.any(DrillScanRel.class))), "HivePushPartitionFilterIntoScan:Filter_On_Project_Hive", optimizerRulesContext) {
@Override
public PartitionDescriptor getPartitionDescriptor(PlannerSettings settings, TableScan scanRel) {
return new HivePartitionDescriptor(settings, (DrillScanRel) scanRel, getOptimizerRulesContext().getManagedBuffer(), defaultPartitionValue);
}
@Override
public boolean matches(RelOptRuleCall call) {
final DrillScanRel scan = (DrillScanRel) call.rel(2);
GroupScan groupScan = scan.getGroupScan();
// this rule is applicable only for Hive based partition pruning
if (PrelUtil.getPlannerSettings(scan.getCluster().getPlanner()).isHepPartitionPruningEnabled()) {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown() && !scan.partitionFilterPushdown();
} else {
return groupScan instanceof HiveScan && groupScan.supportsPartitionFilterPushdown();
}
}
@Override
public void onMatch(RelOptRuleCall call) {
final DrillFilterRel filterRel = call.rel(0);
final DrillProjectRel projectRel = call.rel(1);
final DrillScanRel scanRel = call.rel(2);
doOnMatch(call, filterRel, projectRel, scanRel);
}
};
}
Aggregations