Search in sources :

Example 11 with ExprMacroTable

use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.

the class DruidAvaticaHandlerTest method testMaxRowsPerFrame.

@Test
public void testMaxRowsPerFrame() throws Exception {
    final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {

        @Override
        public int getMaxConnections() {
            return 2;
        }

        @Override
        public int getMaxStatementsPerConnection() {
            return 4;
        }

        @Override
        public int getMaxRowsPerFrame() {
            return 2;
        }
    };
    final PlannerConfig plannerConfig = new PlannerConfig();
    final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
    final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
    final List<Meta.Frame> frames = new ArrayList<>();
    DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {

        @Override
        public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
            // overriding fetch allows us to track how many frames are processed after the first frame
            Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
            frames.add(frame);
            return frame;
        }
    };
    final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
    final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
    Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
    smallFrameServer.setHandler(handler);
    smallFrameServer.start();
    String smallFrameUrl = this.getJdbcConnectionString(port);
    Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
    final ResultSet resultSet = smallFrameClient.createStatement().executeQuery("SELECT dim1 FROM druid.foo");
    List<Map<String, Object>> rows = getRows(resultSet);
    Assert.assertEquals(2, frames.size());
    Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
Also used : Server(org.eclipse.jetty.server.Server) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Connection(java.sql.Connection) AbstractAvaticaHandler(org.apache.calcite.avatica.server.AbstractAvaticaHandler) DruidOperatorTable(org.apache.druid.sql.calcite.planner.DruidOperatorTable) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) ServerConfig(org.apache.druid.server.initialization.ServerConfig) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) ResultSet(java.sql.ResultSet) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 12 with ExprMacroTable

use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.

the class DruidAvaticaHandlerTest method testMinRowsPerFrame.

@Test
public void testMinRowsPerFrame() throws Exception {
    final int minFetchSize = 1000;
    final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {

        @Override
        public int getMaxConnections() {
            return 2;
        }

        @Override
        public int getMaxStatementsPerConnection() {
            return 4;
        }

        @Override
        public int getMinRowsPerFrame() {
            return minFetchSize;
        }
    };
    final PlannerConfig plannerConfig = new PlannerConfig();
    final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
    final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
    final List<Meta.Frame> frames = new ArrayList<>();
    DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
    DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {

        @Override
        public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
            // overriding fetch allows us to track how many frames are processed after the first frame, and also fetch size
            Assert.assertEquals(minFetchSize, fetchMaxRowCount);
            Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
            frames.add(frame);
            return frame;
        }
    };
    final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
    final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
    Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
    smallFrameServer.setHandler(handler);
    smallFrameServer.start();
    String smallFrameUrl = this.getJdbcConnectionString(port);
    Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
    // use a prepared statement because avatica currently ignores fetchSize on the initial fetch of a Statement
    PreparedStatement statement = smallFrameClient.prepareStatement("SELECT dim1 FROM druid.foo");
    // set a fetch size below the minimum configured threshold
    statement.setFetchSize(2);
    final ResultSet resultSet = statement.executeQuery();
    List<Map<String, Object>> rows = getRows(resultSet);
    // expect minimum threshold to be used, which should be enough to do this all in first fetch
    Assert.assertEquals(0, frames.size());
    Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
Also used : Server(org.eclipse.jetty.server.Server) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Connection(java.sql.Connection) AbstractAvaticaHandler(org.apache.calcite.avatica.server.AbstractAvaticaHandler) PreparedStatement(java.sql.PreparedStatement) DruidOperatorTable(org.apache.druid.sql.calcite.planner.DruidOperatorTable) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) ServerConfig(org.apache.druid.server.initialization.ServerConfig) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) ResultSet(java.sql.ResultSet) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 13 with ExprMacroTable

use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.

the class StringSqlAggregator method toDruidAggregation.

@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
    final List<DruidExpression> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).map(rexNode -> Expressions.toDruidExpression(plannerContext, rowSignature, rexNode)).collect(Collectors.toList());
    if (arguments.stream().anyMatch(Objects::isNull)) {
        return null;
    }
    RexNode separatorNode = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(1));
    if (!separatorNode.isA(SqlKind.LITERAL)) {
        // separator must be a literal
        return null;
    }
    String separator = RexLiteral.stringValue(separatorNode);
    if (separator == null) {
        // separator must not be null
        return null;
    }
    Integer maxSizeBytes = null;
    if (arguments.size() > 2) {
        RexNode maxBytes = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(2));
        if (!maxBytes.isA(SqlKind.LITERAL)) {
            // maxBytes must be a literal
            return null;
        }
        maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
    }
    final DruidExpression arg = arguments.get(0);
    final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
    final String initialvalue = "[]";
    final ColumnType elementType = ColumnType.STRING;
    final String fieldName;
    if (arg.isDirectColumnAccess()) {
        fieldName = arg.getDirectColumn();
    } else {
        fieldName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(arg, elementType);
    }
    final String finalizer = StringUtils.format("if(array_length(o) == 0, null, array_to_string(o, '%s'))", separator);
    final NotDimFilter dimFilter = new NotDimFilter(new SelectorDimFilter(fieldName, null, null));
    if (aggregateCall.isDistinct()) {
        return Aggregation.create(// string_agg ignores nulls
        new FilteredAggregatorFactory(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_set_add(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, finalizer, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable), dimFilter));
    } else {
        return Aggregation.create(// string_agg ignores nulls
        new FilteredAggregatorFactory(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_append(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, finalizer, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable), dimFilter));
    }
}
Also used : Project(org.apache.calcite.rel.core.Project) SqlAggregator(org.apache.druid.sql.calcite.aggregation.SqlAggregator) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) RowSignatures(org.apache.druid.sql.calcite.table.RowSignatures) UnsupportedSQLQueryException(org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) Optionality(org.apache.calcite.util.Optionality) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) RexNode(org.apache.calcite.rex.RexNode) VirtualColumnRegistry(org.apache.druid.sql.calcite.rel.VirtualColumnRegistry) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) Nullable(javax.annotation.Nullable) SqlOperatorBinding(org.apache.calcite.sql.SqlOperatorBinding) RelDataType(org.apache.calcite.rel.type.RelDataType) ImmutableSet(com.google.common.collect.ImmutableSet) SqlKind(org.apache.calcite.sql.SqlKind) SqlTypeFamily(org.apache.calcite.sql.type.SqlTypeFamily) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) NotDimFilter(org.apache.druid.query.filter.NotDimFilter) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) InferTypes(org.apache.calcite.sql.type.InferTypes) RexBuilder(org.apache.calcite.rex.RexBuilder) RexLiteral(org.apache.calcite.rex.RexLiteral) SqlFunctionCategory(org.apache.calcite.sql.SqlFunctionCategory) StringUtils(org.apache.druid.java.util.common.StringUtils) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Collectors(java.util.stream.Collectors) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) Objects(java.util.Objects) SqlReturnTypeInference(org.apache.calcite.sql.type.SqlReturnTypeInference) List(java.util.List) RowSignature(org.apache.druid.segment.column.RowSignature) OperandTypes(org.apache.calcite.sql.type.OperandTypes) ColumnType(org.apache.druid.segment.column.ColumnType) AggregateCall(org.apache.calcite.rel.core.AggregateCall) SqlAggFunction(org.apache.calcite.sql.SqlAggFunction) Calcites(org.apache.druid.sql.calcite.planner.Calcites) Expressions(org.apache.druid.sql.calcite.expression.Expressions) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) ColumnType(org.apache.druid.segment.column.ColumnType) NotDimFilter(org.apache.druid.query.filter.NotDimFilter) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) Objects(java.util.Objects) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) RexNode(org.apache.calcite.rex.RexNode) Nullable(javax.annotation.Nullable)

Example 14 with ExprMacroTable

use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.

the class BitwiseSqlAggregator method toDruidAggregation.

@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
    final List<DruidExpression> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).map(rexNode -> Expressions.toDruidExpression(plannerContext, rowSignature, rexNode)).collect(Collectors.toList());
    if (arguments.stream().anyMatch(Objects::isNull)) {
        return null;
    }
    final DruidExpression arg = arguments.get(0);
    final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
    final String fieldName;
    if (arg.isDirectColumnAccess()) {
        fieldName = arg.getDirectColumn();
    } else {
        fieldName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(arg, ColumnType.LONG);
    }
    return Aggregation.create(new FilteredAggregatorFactory(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, "0", null, null, false, false, StringUtils.format("%s(\"__acc\", \"%s\")", op.getDruidFunction(), fieldName), null, null, null, null, macroTable), new NotDimFilter(new SelectorDimFilter(fieldName, null, null))));
}
Also used : Project(org.apache.calcite.rel.core.Project) SqlAggregator(org.apache.druid.sql.calcite.aggregation.SqlAggregator) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) ReturnTypes(org.apache.calcite.sql.type.ReturnTypes) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) Optionality(org.apache.calcite.util.Optionality) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) VirtualColumnRegistry(org.apache.druid.sql.calcite.rel.VirtualColumnRegistry) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) Nullable(javax.annotation.Nullable) ImmutableSet(com.google.common.collect.ImmutableSet) SqlKind(org.apache.calcite.sql.SqlKind) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) NotDimFilter(org.apache.druid.query.filter.NotDimFilter) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) InferTypes(org.apache.calcite.sql.type.InferTypes) RexBuilder(org.apache.calcite.rex.RexBuilder) SqlFunctionCategory(org.apache.calcite.sql.SqlFunctionCategory) StringUtils(org.apache.druid.java.util.common.StringUtils) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Collectors(java.util.stream.Collectors) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) Objects(java.util.Objects) List(java.util.List) SqlStdOperatorTable(org.apache.calcite.sql.fun.SqlStdOperatorTable) RowSignature(org.apache.druid.segment.column.RowSignature) OperandTypes(org.apache.calcite.sql.type.OperandTypes) ColumnType(org.apache.druid.segment.column.ColumnType) AggregateCall(org.apache.calcite.rel.core.AggregateCall) SqlAggFunction(org.apache.calcite.sql.SqlAggFunction) Expressions(org.apache.druid.sql.calcite.expression.Expressions) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) NotDimFilter(org.apache.druid.query.filter.NotDimFilter) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) Objects(java.util.Objects) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) Nullable(javax.annotation.Nullable)

Example 15 with ExprMacroTable

use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.

the class ArraySqlAggregator method toDruidAggregation.

@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
    final List<RexNode> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).collect(Collectors.toList());
    Integer maxSizeBytes = null;
    if (arguments.size() > 1) {
        RexNode maxBytes = arguments.get(1);
        if (!maxBytes.isA(SqlKind.LITERAL)) {
            // maxBytes must be a literal
            return null;
        }
        maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
    }
    final DruidExpression arg = Expressions.toDruidExpression(plannerContext, rowSignature, arguments.get(0));
    if (arg == null) {
        // can't translate argument
        return null;
    }
    final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
    final String fieldName;
    final String initialvalue;
    final ColumnType druidType = Calcites.getValueTypeForRelDataTypeFull(aggregateCall.getType());
    final ColumnType elementType;
    if (druidType == null || !druidType.isArray()) {
        initialvalue = "[]";
        elementType = ColumnType.STRING;
    } else {
        initialvalue = ExpressionType.fromColumnTypeStrict(druidType).asTypeString() + "[]";
        elementType = (ColumnType) druidType.getElementType();
    }
    if (arg.isDirectColumnAccess()) {
        fieldName = arg.getDirectColumn();
    } else {
        fieldName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(arg, elementType);
    }
    if (aggregateCall.isDistinct()) {
        return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, true, false, StringUtils.format("array_set_add(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
    } else {
        return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, true, false, StringUtils.format("array_append(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
    }
}
Also used : Project(org.apache.calcite.rel.core.Project) SqlAggregator(org.apache.druid.sql.calcite.aggregation.SqlAggregator) RowSignatures(org.apache.druid.sql.calcite.table.RowSignatures) UnsupportedSQLQueryException(org.apache.druid.sql.calcite.planner.UnsupportedSQLQueryException) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) Optionality(org.apache.calcite.util.Optionality) RexNode(org.apache.calcite.rex.RexNode) ExpressionType(org.apache.druid.math.expr.ExpressionType) VirtualColumnRegistry(org.apache.druid.sql.calcite.rel.VirtualColumnRegistry) PlannerContext(org.apache.druid.sql.calcite.planner.PlannerContext) Nullable(javax.annotation.Nullable) SqlOperatorBinding(org.apache.calcite.sql.SqlOperatorBinding) RelDataType(org.apache.calcite.rel.type.RelDataType) ImmutableSet(com.google.common.collect.ImmutableSet) SqlKind(org.apache.calcite.sql.SqlKind) SqlTypeFamily(org.apache.calcite.sql.type.SqlTypeFamily) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) InferTypes(org.apache.calcite.sql.type.InferTypes) RexBuilder(org.apache.calcite.rex.RexBuilder) RexLiteral(org.apache.calcite.rex.RexLiteral) SqlFunctionCategory(org.apache.calcite.sql.SqlFunctionCategory) StringUtils(org.apache.druid.java.util.common.StringUtils) Aggregation(org.apache.druid.sql.calcite.aggregation.Aggregation) Collectors(java.util.stream.Collectors) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) SqlReturnTypeInference(org.apache.calcite.sql.type.SqlReturnTypeInference) List(java.util.List) RowSignature(org.apache.druid.segment.column.RowSignature) OperandTypes(org.apache.calcite.sql.type.OperandTypes) ColumnType(org.apache.druid.segment.column.ColumnType) AggregateCall(org.apache.calcite.rel.core.AggregateCall) SqlAggFunction(org.apache.calcite.sql.SqlAggFunction) Calcites(org.apache.druid.sql.calcite.planner.Calcites) Expressions(org.apache.druid.sql.calcite.expression.Expressions) ColumnType(org.apache.druid.segment.column.ColumnType) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) DruidExpression(org.apache.druid.sql.calcite.expression.DruidExpression) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) RexNode(org.apache.calcite.rex.RexNode) Nullable(javax.annotation.Nullable)

Aggregations

ExprMacroTable (org.apache.druid.math.expr.ExprMacroTable)16 Nullable (javax.annotation.Nullable)7 DruidExpression (org.apache.druid.sql.calcite.expression.DruidExpression)7 ImmutableSet (com.google.common.collect.ImmutableSet)5 DruidOperatorTable (org.apache.druid.sql.calcite.planner.DruidOperatorTable)5 PlannerConfig (org.apache.druid.sql.calcite.planner.PlannerConfig)5 DruidSchemaCatalog (org.apache.druid.sql.calcite.schema.DruidSchemaCatalog)5 ArrayList (java.util.ArrayList)4 List (java.util.List)4 Collectors (java.util.stream.Collectors)4 AggregateCall (org.apache.calcite.rel.core.AggregateCall)4 Project (org.apache.calcite.rel.core.Project)4 RexBuilder (org.apache.calcite.rex.RexBuilder)4 RexNode (org.apache.calcite.rex.RexNode)4 SqlAggFunction (org.apache.calcite.sql.SqlAggFunction)4 SqlFunctionCategory (org.apache.calcite.sql.SqlFunctionCategory)4 SqlKind (org.apache.calcite.sql.SqlKind)4 InferTypes (org.apache.calcite.sql.type.InferTypes)4 OperandTypes (org.apache.calcite.sql.type.OperandTypes)4 Optionality (org.apache.calcite.util.Optionality)4