use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.
the class DefaultLimitSpecTest method testBuildWithExplicitOrder.
@Test
public void testBuildWithExplicitOrder() {
DefaultLimitSpec limitSpec = new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.ASCENDING)), 2);
Function<Sequence<ResultRow>, Sequence<ResultRow>> limitFn = limitSpec.build(GroupByQuery.builder().setDataSource("dummy").setInterval("1000/3000").setDimensions(new DefaultDimensionSpec("k1", "k1")).setAggregatorSpecs(new LongSumAggregatorFactory("k2", "k2")).setPostAggregatorSpecs(ImmutableList.of(new ConstantPostAggregator("k3", 1L))).setGranularity(Granularities.NONE).build());
Assert.assertEquals(ImmutableList.of(testRowsList.get(0), testRowsList.get(1)), limitFn.apply(Sequences.simple(testRowsList)).toList());
}
use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.
the class DefaultLimitSpecTest method testWithSortByDimsFirst.
@Test
public void testWithSortByDimsFirst() {
DefaultLimitSpec limitSpec = new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.ASCENDING, StringComparators.NUMERIC)), 2);
Function<Sequence<ResultRow>, Sequence<ResultRow>> limitFn = limitSpec.build(GroupByQuery.builder().setDataSource("dummy").setInterval("1000/3000").setDimensions(new DefaultDimensionSpec("k1", "k1", ColumnType.DOUBLE)).setGranularity(Granularities.NONE).overrideContext(ImmutableMap.of(GroupByQuery.CTX_KEY_SORT_BY_DIMS_FIRST, true)).build());
Assert.assertEquals(ImmutableList.of(testRowsWithTimestampList.get(2), testRowsWithTimestampList.get(0)), limitFn.apply(Sequences.simple(testRowsWithTimestampList)).toList());
}
use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.
the class QueryLifecycle method execute.
/**
* Execute the query. Can only be called if the query has been authorized. Note that query logs and metrics will
* not be emitted automatically when the Sequence is fully iterated. It is the caller's responsibility to call
* {@link #emitLogsAndMetrics(Throwable, String, long)} to emit logs and metrics.
*
* @return result sequence and response context
*/
public QueryResponse execute() {
transition(State.AUTHORIZED, State.EXECUTING);
final ResponseContext responseContext = DirectDruidClient.makeResponseContextForQuery();
final Sequence res = QueryPlus.wrap(baseQuery).withIdentity(authenticationResult.getIdentity()).run(texasRanger, responseContext);
return new QueryResponse(res == null ? Sequences.empty() : res, responseContext);
}
use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.
the class DumpSegment method runDump.
private void runDump(final Injector injector, final QueryableIndex index) throws IOException {
final ObjectMapper objectMapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class));
final QueryableIndexStorageAdapter adapter = new QueryableIndexStorageAdapter(index);
final List<String> columnNames = getColumnsToInclude(index);
final DimFilter filter = filterJson != null ? objectMapper.readValue(filterJson, DimFilter.class) : null;
final Sequence<Cursor> cursors = adapter.makeCursors(Filters.toFilter(filter), index.getDataInterval().withChronology(ISOChronology.getInstanceUTC()), VirtualColumns.EMPTY, Granularities.ALL, false, null);
withOutputStream(new Function<OutputStream, Object>() {
@Override
public Object apply(final OutputStream out) {
final Sequence<Object> sequence = Sequences.map(cursors, new Function<Cursor, Object>() {
@Override
public Object apply(Cursor cursor) {
ColumnSelectorFactory columnSelectorFactory = cursor.getColumnSelectorFactory();
final List<BaseObjectColumnValueSelector> selectors = columnNames.stream().map(columnSelectorFactory::makeColumnValueSelector).collect(Collectors.toList());
while (!cursor.isDone()) {
final Map<String, Object> row = Maps.newLinkedHashMap();
for (int i = 0; i < columnNames.size(); i++) {
final String columnName = columnNames.get(i);
final Object value = selectors.get(i).getObject();
if (timeISO8601 && columnNames.get(i).equals(ColumnHolder.TIME_COLUMN_NAME)) {
row.put(columnName, new DateTime(value, DateTimeZone.UTC).toString());
} else {
row.put(columnName, value);
}
}
try {
out.write(objectMapper.writeValueAsBytes(row));
out.write('\n');
} catch (IOException e) {
throw new RuntimeException(e);
}
cursor.advance();
}
return null;
}
});
evaluateSequenceForSideEffects(sequence);
return null;
}
});
}
use of org.apache.druid.java.util.common.guava.Sequence in project druid by druid-io.
the class NestedQueryPushDownTest method runNestedQueryWithForcePushDown.
private Sequence<ResultRow> runNestedQueryWithForcePushDown(GroupByQuery nestedQuery) {
ResponseContext context = ResponseContext.createEmpty();
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
GroupByQuery pushDownQuery = nestedQuery;
QueryRunner<ResultRow> segment1Runner = new FinalizeResultsQueryRunner<ResultRow>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, getQueryRunnerForSegment1())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> segment2Runner = new FinalizeResultsQueryRunner<ResultRow>(toolChest.mergeResults(groupByFactory2.mergeRunners(executorService, getQueryRunnerForSegment2())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> queryRunnerForSegments = new FinalizeResultsQueryRunner<>(toolChest.mergeResults((queryPlus, responseContext) -> Sequences.simple(ImmutableList.of(Sequences.map(segment1Runner.run(queryPlus, responseContext), toolChest.makePreComputeManipulatorFn((GroupByQuery) queryPlus.getQuery(), MetricManipulatorFns.deserializing())), Sequences.map(segment2Runner.run(queryPlus, responseContext), toolChest.makePreComputeManipulatorFn((GroupByQuery) queryPlus.getQuery(), MetricManipulatorFns.deserializing())))).flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering())), (QueryToolChest) toolChest);
GroupByStrategy strategy = ((GroupByQueryRunnerFactory) groupByFactory).getStrategySelector().strategize(nestedQuery);
// Historicals execute the query with force push down flag as false
GroupByQuery queryWithPushDownDisabled = pushDownQuery.withOverriddenContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_FORCE_PUSH_DOWN_NESTED_QUERY, false));
Sequence<ResultRow> pushDownQueryResults = strategy.mergeResults(queryRunnerForSegments, queryWithPushDownDisabled, context);
return toolChest.mergeResults((queryPlus, responseContext) -> pushDownQueryResults).run(QueryPlus.wrap(nestedQuery), context);
}
Aggregations