use of io.druid.query.select.PagingSpec in project druid by druid-io.
the class CachingClusteredClientTest method testSelectCaching.
@Test
public void testSelectCaching() throws Exception {
final Set<String> dimensions = Sets.<String>newHashSet("a");
final Set<String> metrics = Sets.<String>newHashSet("rows");
Druids.SelectQueryBuilder builder = Druids.newSelectQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).dimensions(Arrays.asList("a")).metrics(Arrays.asList("rows")).pagingSpec(new PagingSpec(null, 3)).context(CONTEXT);
testQueryCaching(client, builder.build(), new Interval("2011-01-01/2011-01-02"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-01"), ImmutableMap.of("a", "b", "rows", 1)), new Interval("2011-01-02/2011-01-03"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-02"), ImmutableMap.of("a", "c", "rows", 5)), new Interval("2011-01-05/2011-01-10"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-05"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-06"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-07"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-08"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-09"), ImmutableMap.of("a", "h", "rows", 9)), new Interval("2011-01-05/2011-01-10"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-05T01"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-06T01"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-07T01"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-08T01"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-09T01"), ImmutableMap.of("a", "h", "rows", 9)));
QueryRunner runner = new FinalizeResultsQueryRunner(client, new SelectQueryQueryToolChest(jsonMapper, QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator(), selectConfigSupplier));
HashMap<String, Object> context = new HashMap<String, Object>();
TestHelper.assertExpectedResults(makeSelectResults(dimensions, metrics, new DateTime("2011-01-01"), ImmutableMap.of("a", "b", "rows", 1), new DateTime("2011-01-02"), ImmutableMap.of("a", "c", "rows", 5), new DateTime("2011-01-05"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-05T01"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-06"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-06T01"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-07"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-07T01"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-08"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-08T01"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-09"), ImmutableMap.of("a", "h", "rows", 9), new DateTime("2011-01-09T01"), ImmutableMap.of("a", "h", "rows", 9)), runner.run(builder.intervals("2011-01-01/2011-01-10").build(), context));
}
use of io.druid.query.select.PagingSpec in project druid by druid-io.
the class CachingClusteredClientTest method testSelectCachingRenamedOutputName.
@Test
public void testSelectCachingRenamedOutputName() throws Exception {
final Set<String> dimensions = Sets.<String>newHashSet("a");
final Set<String> metrics = Sets.<String>newHashSet("rows");
Druids.SelectQueryBuilder builder = Druids.newSelectQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).dimensions(Arrays.asList("a")).metrics(Arrays.asList("rows")).pagingSpec(new PagingSpec(null, 3)).context(CONTEXT);
testQueryCaching(client, builder.build(), new Interval("2011-01-01/2011-01-02"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-01"), ImmutableMap.of("a", "b", "rows", 1)), new Interval("2011-01-02/2011-01-03"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-02"), ImmutableMap.of("a", "c", "rows", 5)), new Interval("2011-01-05/2011-01-10"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-05"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-06"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-07"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-08"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-09"), ImmutableMap.of("a", "h", "rows", 9)), new Interval("2011-01-05/2011-01-10"), makeSelectResults(dimensions, metrics, new DateTime("2011-01-05T01"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-06T01"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-07T01"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-08T01"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-09T01"), ImmutableMap.of("a", "h", "rows", 9)));
QueryRunner runner = new FinalizeResultsQueryRunner(client, new SelectQueryQueryToolChest(jsonMapper, QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator(), selectConfigSupplier));
HashMap<String, Object> context = new HashMap<String, Object>();
TestHelper.assertExpectedResults(makeSelectResults(dimensions, metrics, new DateTime("2011-01-01"), ImmutableMap.of("a", "b", "rows", 1), new DateTime("2011-01-02"), ImmutableMap.of("a", "c", "rows", 5), new DateTime("2011-01-05"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-05T01"), ImmutableMap.of("a", "d", "rows", 5), new DateTime("2011-01-06"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-06T01"), ImmutableMap.of("a", "e", "rows", 6), new DateTime("2011-01-07"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-07T01"), ImmutableMap.of("a", "f", "rows", 7), new DateTime("2011-01-08"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-08T01"), ImmutableMap.of("a", "g", "rows", 8), new DateTime("2011-01-09"), ImmutableMap.of("a", "h", "rows", 9), new DateTime("2011-01-09T01"), ImmutableMap.of("a", "h", "rows", 9)), runner.run(builder.intervals("2011-01-01/2011-01-10").build(), context));
TestHelper.assertExpectedResults(makeSelectResults(dimensions, metrics, new DateTime("2011-01-01"), ImmutableMap.of("a2", "b", "rows", 1), new DateTime("2011-01-02"), ImmutableMap.of("a2", "c", "rows", 5), new DateTime("2011-01-05"), ImmutableMap.of("a2", "d", "rows", 5), new DateTime("2011-01-05T01"), ImmutableMap.of("a2", "d", "rows", 5), new DateTime("2011-01-06"), ImmutableMap.of("a2", "e", "rows", 6), new DateTime("2011-01-06T01"), ImmutableMap.of("a2", "e", "rows", 6), new DateTime("2011-01-07"), ImmutableMap.of("a2", "f", "rows", 7), new DateTime("2011-01-07T01"), ImmutableMap.of("a2", "f", "rows", 7), new DateTime("2011-01-08"), ImmutableMap.of("a2", "g", "rows", 8), new DateTime("2011-01-08T01"), ImmutableMap.of("a2", "g", "rows", 8), new DateTime("2011-01-09"), ImmutableMap.of("a2", "h", "rows", 9), new DateTime("2011-01-09T01"), ImmutableMap.of("a2", "h", "rows", 9)), runner.run(builder.intervals("2011-01-01/2011-01-10").dimensionSpecs(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("a", "a2"))).build(), context));
}
use of io.druid.query.select.PagingSpec in project druid by druid-io.
the class SelectBenchmark method incrementQueryPagination.
// don't run this benchmark with a query that doesn't use QueryGranularities.ALL,
// this pagination function probably doesn't work correctly in that case.
private SelectQuery incrementQueryPagination(SelectQuery query, SelectResultValue prevResult) {
Map<String, Integer> pagingIdentifiers = prevResult.getPagingIdentifiers();
Map<String, Integer> newPagingIdentifers = new HashMap<>();
for (String segmentId : pagingIdentifiers.keySet()) {
int newOffset = pagingIdentifiers.get(segmentId) + 1;
newPagingIdentifers.put(segmentId, newOffset);
}
return query.withPagingSpec(new PagingSpec(newPagingIdentifers, pagingThreshold));
}
use of io.druid.query.select.PagingSpec in project druid by druid-io.
the class QueryMaker method executeSelect.
private Sequence<Object[]> executeSelect(final DruidQueryBuilder queryBuilder, final SelectQuery baseQuery) {
Preconditions.checkState(queryBuilder.getGrouping() == null, "grouping must be null");
final List<RelDataTypeField> fieldList = queryBuilder.getRowType().getFieldList();
final Integer limit = queryBuilder.getLimitSpec() != null ? queryBuilder.getLimitSpec().getLimit() : null;
// Select is paginated, we need to make multiple queries.
final Sequence<Sequence<Object[]>> sequenceOfSequences = Sequences.simple(new Iterable<Sequence<Object[]>>() {
@Override
public Iterator<Sequence<Object[]>> iterator() {
final AtomicBoolean morePages = new AtomicBoolean(true);
final AtomicReference<Map<String, Integer>> pagingIdentifiers = new AtomicReference<>();
final AtomicLong rowsRead = new AtomicLong();
// Each Sequence<Object[]> is one page.
return new Iterator<Sequence<Object[]>>() {
@Override
public boolean hasNext() {
return morePages.get();
}
@Override
public Sequence<Object[]> next() {
final SelectQuery queryWithPagination = baseQuery.withPagingSpec(new PagingSpec(pagingIdentifiers.get(), plannerContext.getPlannerConfig().getSelectThreshold(), true));
Hook.QUERY_PLAN.run(queryWithPagination);
morePages.set(false);
final AtomicBoolean gotResult = new AtomicBoolean();
return Sequences.concat(Sequences.map(queryWithPagination.run(walker, Maps.<String, Object>newHashMap()), new Function<Result<SelectResultValue>, Sequence<Object[]>>() {
@Override
public Sequence<Object[]> apply(final Result<SelectResultValue> result) {
if (!gotResult.compareAndSet(false, true)) {
throw new ISE("WTF?! Expected single result from Select query but got multiple!");
}
pagingIdentifiers.set(result.getValue().getPagingIdentifiers());
final List<Object[]> retVals = new ArrayList<>();
for (EventHolder holder : result.getValue().getEvents()) {
morePages.set(true);
final Map<String, Object> map = holder.getEvent();
final Object[] retVal = new Object[fieldList.size()];
for (RelDataTypeField field : fieldList) {
final String outputName = queryBuilder.getRowOrder().get(field.getIndex());
if (outputName.equals(Column.TIME_COLUMN_NAME)) {
retVal[field.getIndex()] = coerce(holder.getTimestamp().getMillis(), field.getType().getSqlTypeName());
} else {
retVal[field.getIndex()] = coerce(map.get(outputName), field.getType().getSqlTypeName());
}
}
if (limit == null || rowsRead.incrementAndGet() <= limit) {
retVals.add(retVal);
} else {
morePages.set(false);
return Sequences.simple(retVals);
}
}
return Sequences.simple(retVals);
}
}));
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
});
return Sequences.concat(sequenceOfSequences);
}
use of io.druid.query.select.PagingSpec in project druid by druid-io.
the class DruidQueryBuilder method toSelectQuery.
/**
* Return this query as a Select query, or null if this query is not compatible with Select.
*
* @param dataSource data source to query
* @param sourceRowSignature row signature of the dataSource
* @param context query context
*
* @return query or null
*/
public SelectQuery toSelectQuery(final DataSource dataSource, final RowSignature sourceRowSignature, final Map<String, Object> context) {
if (grouping != null) {
return null;
}
final Filtration filtration = Filtration.create(filter).optimize(sourceRowSignature);
final boolean descending;
if (limitSpec != null) {
// Safe to assume limitSpec has zero or one entry; DruidSelectSortRule wouldn't push in anything else.
if (limitSpec.getColumns().size() > 0) {
final OrderByColumnSpec orderBy = Iterables.getOnlyElement(limitSpec.getColumns());
if (!orderBy.getDimension().equals(Column.TIME_COLUMN_NAME)) {
throw new ISE("WTF?! Got select with non-time orderBy[%s]", orderBy);
}
descending = orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING;
} else {
descending = false;
}
} else {
descending = false;
}
return new SelectQuery(dataSource, filtration.getQuerySegmentSpec(), descending, filtration.getDimFilter(), Granularities.ALL, selectProjection != null ? selectProjection.getDimensions() : ImmutableList.<DimensionSpec>of(), selectProjection != null ? selectProjection.getMetrics() : ImmutableList.<String>of(), null, new PagingSpec(null, 0), /* dummy -- will be replaced */
context);
}
Aggregations