use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMaxRowsPerFrame.
@Test
public void testMaxRowsPerFrame() throws Exception {
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMaxRowsPerFrame() {
return 2;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
final ResultSet resultSet = smallFrameClient.createStatement().executeQuery("SELECT dim1 FROM druid.foo");
List<Map<String, Object>> rows = getRows(resultSet);
Assert.assertEquals(2, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.
the class DruidAvaticaHandlerTest method testMinRowsPerFrame.
@Test
public void testMinRowsPerFrame() throws Exception {
final int minFetchSize = 1000;
final AvaticaServerConfig smallFrameConfig = new AvaticaServerConfig() {
@Override
public int getMaxConnections() {
return 2;
}
@Override
public int getMaxStatementsPerConnection() {
return 4;
}
@Override
public int getMinRowsPerFrame() {
return minFetchSize;
}
};
final PlannerConfig plannerConfig = new PlannerConfig();
final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
final List<Meta.Frame> frames = new ArrayList<>();
DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER);
DruidMeta smallFrameDruidMeta = new DruidMeta(CalciteTests.createSqlLifecycleFactory(new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, AuthTestUtils.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME)), smallFrameConfig, new ErrorHandler(new ServerConfig()), injector) {
@Override
public Frame fetch(final StatementHandle statement, final long offset, final int fetchMaxRowCount) throws NoSuchStatementException, MissingResultsException {
// overriding fetch allows us to track how many frames are processed after the first frame, and also fetch size
Assert.assertEquals(minFetchSize, fetchMaxRowCount);
Frame frame = super.fetch(statement, offset, fetchMaxRowCount);
frames.add(frame);
return frame;
}
};
final AbstractAvaticaHandler handler = this.getAvaticaHandler(smallFrameDruidMeta);
final int port = ThreadLocalRandom.current().nextInt(9999) + 20000;
Server smallFrameServer = new Server(new InetSocketAddress("127.0.0.1", port));
smallFrameServer.setHandler(handler);
smallFrameServer.start();
String smallFrameUrl = this.getJdbcConnectionString(port);
Connection smallFrameClient = DriverManager.getConnection(smallFrameUrl, "regularUser", "druid");
// use a prepared statement because avatica currently ignores fetchSize on the initial fetch of a Statement
PreparedStatement statement = smallFrameClient.prepareStatement("SELECT dim1 FROM druid.foo");
// set a fetch size below the minimum configured threshold
statement.setFetchSize(2);
final ResultSet resultSet = statement.executeQuery();
List<Map<String, Object>> rows = getRows(resultSet);
// expect minimum threshold to be used, which should be enough to do this all in first fetch
Assert.assertEquals(0, frames.size());
Assert.assertEquals(ImmutableList.of(ImmutableMap.of("dim1", ""), ImmutableMap.of("dim1", "10.1"), ImmutableMap.of("dim1", "2"), ImmutableMap.of("dim1", "1"), ImmutableMap.of("dim1", "def"), ImmutableMap.of("dim1", "abc")), rows);
}
use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.
the class StringSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
final List<DruidExpression> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).map(rexNode -> Expressions.toDruidExpression(plannerContext, rowSignature, rexNode)).collect(Collectors.toList());
if (arguments.stream().anyMatch(Objects::isNull)) {
return null;
}
RexNode separatorNode = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(1));
if (!separatorNode.isA(SqlKind.LITERAL)) {
// separator must be a literal
return null;
}
String separator = RexLiteral.stringValue(separatorNode);
if (separator == null) {
// separator must not be null
return null;
}
Integer maxSizeBytes = null;
if (arguments.size() > 2) {
RexNode maxBytes = Expressions.fromFieldAccess(rowSignature, project, aggregateCall.getArgList().get(2));
if (!maxBytes.isA(SqlKind.LITERAL)) {
// maxBytes must be a literal
return null;
}
maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
}
final DruidExpression arg = arguments.get(0);
final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
final String initialvalue = "[]";
final ColumnType elementType = ColumnType.STRING;
final String fieldName;
if (arg.isDirectColumnAccess()) {
fieldName = arg.getDirectColumn();
} else {
fieldName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(arg, elementType);
}
final String finalizer = StringUtils.format("if(array_length(o) == 0, null, array_to_string(o, '%s'))", separator);
final NotDimFilter dimFilter = new NotDimFilter(new SelectorDimFilter(fieldName, null, null));
if (aggregateCall.isDistinct()) {
return Aggregation.create(// string_agg ignores nulls
new FilteredAggregatorFactory(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_set_add(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, finalizer, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable), dimFilter));
} else {
return Aggregation.create(// string_agg ignores nulls
new FilteredAggregatorFactory(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, false, false, StringUtils.format("array_append(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, finalizer, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable), dimFilter));
}
}
use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.
the class BitwiseSqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
final List<DruidExpression> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).map(rexNode -> Expressions.toDruidExpression(plannerContext, rowSignature, rexNode)).collect(Collectors.toList());
if (arguments.stream().anyMatch(Objects::isNull)) {
return null;
}
final DruidExpression arg = arguments.get(0);
final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
final String fieldName;
if (arg.isDirectColumnAccess()) {
fieldName = arg.getDirectColumn();
} else {
fieldName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(arg, ColumnType.LONG);
}
return Aggregation.create(new FilteredAggregatorFactory(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, "0", null, null, false, false, StringUtils.format("%s(\"__acc\", \"%s\")", op.getDruidFunction(), fieldName), null, null, null, null, macroTable), new NotDimFilter(new SelectorDimFilter(fieldName, null, null))));
}
use of org.apache.druid.math.expr.ExprMacroTable in project druid by druid-io.
the class ArraySqlAggregator method toDruidAggregation.
@Nullable
@Override
public Aggregation toDruidAggregation(PlannerContext plannerContext, RowSignature rowSignature, VirtualColumnRegistry virtualColumnRegistry, RexBuilder rexBuilder, String name, AggregateCall aggregateCall, Project project, List<Aggregation> existingAggregations, boolean finalizeAggregations) {
final List<RexNode> arguments = aggregateCall.getArgList().stream().map(i -> Expressions.fromFieldAccess(rowSignature, project, i)).collect(Collectors.toList());
Integer maxSizeBytes = null;
if (arguments.size() > 1) {
RexNode maxBytes = arguments.get(1);
if (!maxBytes.isA(SqlKind.LITERAL)) {
// maxBytes must be a literal
return null;
}
maxSizeBytes = ((Number) RexLiteral.value(maxBytes)).intValue();
}
final DruidExpression arg = Expressions.toDruidExpression(plannerContext, rowSignature, arguments.get(0));
if (arg == null) {
// can't translate argument
return null;
}
final ExprMacroTable macroTable = plannerContext.getExprMacroTable();
final String fieldName;
final String initialvalue;
final ColumnType druidType = Calcites.getValueTypeForRelDataTypeFull(aggregateCall.getType());
final ColumnType elementType;
if (druidType == null || !druidType.isArray()) {
initialvalue = "[]";
elementType = ColumnType.STRING;
} else {
initialvalue = ExpressionType.fromColumnTypeStrict(druidType).asTypeString() + "[]";
elementType = (ColumnType) druidType.getElementType();
}
if (arg.isDirectColumnAccess()) {
fieldName = arg.getDirectColumn();
} else {
fieldName = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(arg, elementType);
}
if (aggregateCall.isDistinct()) {
return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, true, false, StringUtils.format("array_set_add(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_set_add_all(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
} else {
return Aggregation.create(new ExpressionLambdaAggregatorFactory(name, ImmutableSet.of(fieldName), null, initialvalue, null, true, true, false, StringUtils.format("array_append(\"__acc\", \"%s\")", fieldName), StringUtils.format("array_concat(\"__acc\", \"%s\")", name), null, null, maxSizeBytes != null ? new HumanReadableBytes(maxSizeBytes) : null, macroTable));
}
}
Aggregations