use of org.apache.druid.query.SegmentDescriptor in project druid by druid-io.
the class BackgroundCachePopulatorTest method testWrap.
/**
* Method: wrap(final Sequence<T> sequence, final Function<T, CacheType> cacheFn, final Cache cache, final Cache.NamedKey cacheKey)
*/
@Test
public void testWrap() {
String cacheId = "segment";
SegmentDescriptor segmentDescriptor = new SegmentDescriptor(Intervals.of("2011/2012"), "version", 0);
CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(cacheId, segmentDescriptor, cacheStrategy.computeCacheKey(query));
Sequence res = this.backgroundCachePopulator.wrap(this.baseRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty()), (value) -> cacheStrategy.prepareForSegmentLevelCache().apply(value), cache, cacheKey);
Assert.assertFalse("sequence must not be closed", closable.isClosed());
Assert.assertNull("cache must be empty", cache.get(cacheKey));
List results = res.toList();
Assert.assertTrue(closable.isClosed());
List<Result> expectedRes = makeTopNResults(false, OBJECTS);
Assert.assertEquals(expectedRes.toString(), results.toString());
Assert.assertEquals(5, results.size());
}
use of org.apache.druid.query.SegmentDescriptor in project druid by druid-io.
the class BackgroundCachePopulatorTest method testWrapOnFailure.
@Test
public void testWrapOnFailure() {
String cacheId = "segment";
SegmentDescriptor segmentDescriptor = new SegmentDescriptor(Intervals.of("2011/2012"), "version", 0);
CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(cacheId, segmentDescriptor, cacheStrategy.computeCacheKey(query));
Sequence res = this.backgroundCachePopulator.wrap(this.baseRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty()), (value) -> {
throw new RuntimeException("Error");
}, cache, cacheKey);
Assert.assertFalse("sequence must not be closed", closable.isClosed());
Assert.assertNull("cache must be empty", cache.get(cacheKey));
List results = res.toList();
Assert.assertTrue(closable.isClosed());
List<Result> expectedRes = makeTopNResults(false, OBJECTS);
Assert.assertEquals(expectedRes.toString(), results.toString());
Assert.assertEquals(5, results.size());
}
use of org.apache.druid.query.SegmentDescriptor in project druid by druid-io.
the class CachingClusteredClientTest method testNoSegmentPruningForHashPartitionedSegments.
private void testNoSegmentPruningForHashPartitionedSegments(boolean enableSegmentPruning, @Nullable HashPartitionFunction partitionFunction, boolean useEmptyPartitionDimensions) {
DimFilter filter = new AndDimFilter(new SelectorDimFilter("dim1", "a", null), new BoundDimFilter("dim2", "e", "zzz", true, true, false, null, StringComparators.LEXICOGRAPHIC), // Equivalent filter of dim3 below is InDimFilter("dim3", Arrays.asList("c"), null)
new AndDimFilter(new InDimFilter("dim3", Arrays.asList("a", "c", "e", "g"), null), new BoundDimFilter("dim3", "aaa", "ddd", false, false, false, null, StringComparators.LEXICOGRAPHIC)));
final Map<String, Object> context = new HashMap<>(CONTEXT);
context.put(QueryContexts.SECONDARY_PARTITION_PRUNING_KEY, enableSegmentPruning);
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).filters(filter).granularity(GRANULARITY).intervals(SEG_SPEC).intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).context(context).randomQueryId();
TimeseriesQuery query = builder.build();
QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
final Interval interval1 = Intervals.of("2011-01-06/2011-01-07");
final Interval interval2 = Intervals.of("2011-01-07/2011-01-08");
final Interval interval3 = Intervals.of("2011-01-08/2011-01-09");
final DruidServer lastServer = servers[random.nextInt(servers.length)];
List<String> partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1");
final int numPartitions1 = 6;
for (int i = 0; i < numPartitions1; i++) {
ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions1);
timeline.add(interval1, "v", new NumberedPartitionChunk<>(i, numPartitions1, selector));
}
partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim2");
final int numPartitions2 = 3;
for (int i = 0; i < numPartitions2; i++) {
ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions2);
timeline.add(interval2, "v", new NumberedPartitionChunk<>(i, numPartitions2, selector));
}
partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1", "dim3");
final int numPartitions3 = 4;
for (int i = 0; i < numPartitions3; i++) {
ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions3);
timeline.add(interval3, "v", new NumberedPartitionChunk<>(i, numPartitions3, selector));
}
final Capture<QueryPlus> capture = Capture.newInstance();
final Capture<ResponseContext> contextCap = Capture.newInstance();
QueryRunner mockRunner = EasyMock.createNiceMock(QueryRunner.class);
EasyMock.expect(mockRunner.run(EasyMock.capture(capture), EasyMock.capture(contextCap))).andReturn(Sequences.empty()).anyTimes();
EasyMock.expect(serverView.getQueryRunner(lastServer)).andReturn(mockRunner).anyTimes();
EasyMock.replay(serverView);
EasyMock.replay(mockRunner);
// Expected to read all segments
Set<SegmentDescriptor> expcetedDescriptors = new HashSet<>();
IntStream.range(0, numPartitions1).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval1, "v", i)));
IntStream.range(0, numPartitions2).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval2, "v", i)));
IntStream.range(0, numPartitions3).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval3, "v", i)));
runner.run(QueryPlus.wrap(query)).toList();
QuerySegmentSpec querySegmentSpec = ((TimeseriesQuery) capture.getValue().getQuery()).getQuerySegmentSpec();
Assert.assertSame(MultipleSpecificSegmentSpec.class, querySegmentSpec.getClass());
final Set<SegmentDescriptor> actualDescriptors = new HashSet<>(((MultipleSpecificSegmentSpec) querySegmentSpec).getDescriptors());
Assert.assertEquals(expcetedDescriptors, actualDescriptors);
}
use of org.apache.druid.query.SegmentDescriptor in project druid by druid-io.
the class ResponseContextTest method mergeResponseContextTest.
@Test
public void mergeResponseContextTest() {
final ResponseContext ctx1 = ResponseContext.createEmpty();
ctx1.putEntityTag("dummy-etag-1");
ctx1.putUncoveredIntervals(Collections.singletonList(INTERVAL_01), false);
ctx1.addRowScanCount(1L);
final ResponseContext ctx2 = ResponseContext.createEmpty();
ctx2.putEntityTag("dummy-etag-2");
ctx2.putUncoveredIntervals(Collections.singletonList(INTERVAL_12), false);
final SegmentDescriptor sd01 = new SegmentDescriptor(INTERVAL_01, "01", 0);
ctx2.addMissingSegments(Collections.singletonList(sd01));
ctx2.addRowScanCount(2L);
ctx1.merge(ctx2);
Assert.assertEquals("dummy-etag-2", ctx1.getEntityTag());
Assert.assertEquals((Long) 3L, ctx1.getRowScanCount());
Assert.assertArrayEquals(Arrays.asList(INTERVAL_01, INTERVAL_12).toArray(), ctx1.getUncoveredIntervals().toArray());
Assert.assertArrayEquals(Collections.singletonList(sd01).toArray(), ctx1.getMissingSegments().toArray());
}
use of org.apache.druid.query.SegmentDescriptor in project druid by druid-io.
the class ClientQuerySegmentWalkerTest method initWalker.
/**
* Initialize (or reinitialize) our {@link #walker} and {@link #closer}.
*/
private void initWalker(final Map<String, String> serverProperties, QueryScheduler schedulerForTest) {
final ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
final ServerConfig serverConfig = jsonMapper.convertValue(serverProperties, ServerConfig.class);
final SegmentWrangler segmentWrangler = new MapSegmentWrangler(ImmutableMap.<Class<? extends DataSource>, SegmentWrangler>builder().put(InlineDataSource.class, new InlineSegmentWrangler()).build());
final JoinableFactory globalFactory = new JoinableFactory() {
@Override
public boolean isDirectlyJoinable(DataSource dataSource) {
return ((GlobalTableDataSource) dataSource).getName().equals(GLOBAL);
}
@Override
public Optional<Joinable> build(DataSource dataSource, JoinConditionAnalysis condition) {
return Optional.empty();
}
};
final JoinableFactory joinableFactory = new MapJoinableFactory(ImmutableSet.of(globalFactory, new InlineJoinableFactory()), ImmutableMap.<Class<? extends JoinableFactory>, Class<? extends DataSource>>builder().put(InlineJoinableFactory.class, InlineDataSource.class).put(globalFactory.getClass(), GlobalTableDataSource.class).build());
class CapturingWalker implements QuerySegmentWalker {
private QuerySegmentWalker baseWalker;
private ClusterOrLocal how;
CapturingWalker(QuerySegmentWalker baseWalker, ClusterOrLocal how) {
this.baseWalker = baseWalker;
this.how = how;
}
@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
final QueryRunner<T> baseRunner = baseWalker.getQueryRunnerForIntervals(query, intervals);
return (queryPlus, responseContext) -> {
log.info("Query (%s): %s", how, queryPlus.getQuery());
issuedQueries.add(new ExpectedQuery(queryPlus.getQuery(), how));
return baseRunner.run(queryPlus, responseContext);
};
}
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
final QueryRunner<T> baseRunner = baseWalker.getQueryRunnerForSegments(query, specs);
return (queryPlus, responseContext) -> {
log.info("Query (%s): %s", how, queryPlus.getQuery());
issuedQueries.add(new ExpectedQuery(queryPlus.getQuery(), how));
return baseRunner.run(queryPlus, responseContext);
};
}
}
walker = QueryStackTests.createClientQuerySegmentWalker(new CapturingWalker(QueryStackTests.createClusterQuerySegmentWalker(ImmutableMap.<String, VersionedIntervalTimeline<String, ReferenceCountingSegment>>builder().put(FOO, makeTimeline(FOO, FOO_INLINE)).put(BAR, makeTimeline(BAR, BAR_INLINE)).put(MULTI, makeTimeline(MULTI, MULTI_VALUE_INLINE)).put(GLOBAL, makeTimeline(GLOBAL, FOO_INLINE)).put(ARRAY, makeTimeline(ARRAY, ARRAY_INLINE)).put(ARRAY_UNKNOWN, makeTimeline(ARRAY_UNKNOWN, ARRAY_INLINE_UNKNOWN)).build(), joinableFactory, conglomerate, schedulerForTest), ClusterOrLocal.CLUSTER), new CapturingWalker(QueryStackTests.createLocalQuerySegmentWalker(conglomerate, segmentWrangler, joinableFactory, schedulerForTest), ClusterOrLocal.LOCAL), conglomerate, joinableFactory, serverConfig);
}
Aggregations