use of org.joda.time.Interval in project druid by druid-io.
the class SpecificSegmentQueryRunnerTest method testRetry2.
@SuppressWarnings("unchecked")
@Test
public void testRetry2() throws Exception {
final ObjectMapper mapper = new DefaultObjectMapper();
SegmentDescriptor descriptor = new SegmentDescriptor(new Interval("2012-01-01T00:00:00Z/P1D"), "version", 0);
TimeseriesResultBuilder builder = new TimeseriesResultBuilder(new DateTime("2012-01-01T00:00:00Z"));
CountAggregator rows = new CountAggregator();
rows.aggregate();
builder.addMetric("rows", rows);
final Result<TimeseriesResultValue> value = builder.build();
final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {
@Override
public Sequence run(Query query, Map responseContext) {
return Sequences.withEffect(Sequences.simple(Arrays.asList(value)), new Runnable() {
@Override
public void run() {
throw new SegmentMissingException("FAILSAUCE");
}
}, MoreExecutors.sameThreadExecutor());
}
}, new SpecificSegmentSpec(descriptor));
final Map<String, Object> responseContext = Maps.newHashMap();
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(new Interval("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("rows"))).build();
Sequence results = queryRunner.run(query, responseContext);
List<Result<TimeseriesResultValue>> res = Sequences.toList(results, Lists.<Result<TimeseriesResultValue>>newArrayList());
Assert.assertEquals(1, res.size());
Result<TimeseriesResultValue> theVal = res.get(0);
Assert.assertTrue(1L == theVal.getValue().getLongMetric("rows"));
validate(mapper, descriptor, responseContext);
}
use of org.joda.time.Interval in project druid by druid-io.
the class TimeBoundaryQueryQueryToolChestTest method testFilteredFilterSegments.
@Test
public void testFilteredFilterSegments() throws Exception {
List<LogicalSegment> segments = new TimeBoundaryQueryQueryToolChest().filterSegments(FILTERED_BOUNDARY_QUERY, Arrays.asList(createLogicalSegment(new Interval("2013-01-01/P1D")), createLogicalSegment(new Interval("2013-01-01T01/PT1H")), createLogicalSegment(new Interval("2013-01-01T02/PT1H")), createLogicalSegment(new Interval("2013-01-02/P1D")), createLogicalSegment(new Interval("2013-01-03T01/PT1H")), createLogicalSegment(new Interval("2013-01-03T02/PT1H")), createLogicalSegment(new Interval("2013-01-03/P1D"))));
Assert.assertEquals(7, segments.size());
}
use of org.joda.time.Interval in project druid by druid-io.
the class TimeBoundaryQueryQueryToolChestTest method testMinTimeFilterSegments.
@Test
public void testMinTimeFilterSegments() throws Exception {
List<LogicalSegment> segments = new TimeBoundaryQueryQueryToolChest().filterSegments(MINTIME_BOUNDARY_QUERY, Arrays.asList(createLogicalSegment(new Interval("2013-01-01/P1D")), createLogicalSegment(new Interval("2013-01-01T01/PT1H")), createLogicalSegment(new Interval("2013-01-01T02/PT1H")), createLogicalSegment(new Interval("2013-01-02/P1D")), createLogicalSegment(new Interval("2013-01-03T01/PT1H")), createLogicalSegment(new Interval("2013-01-03T02/PT1H")), createLogicalSegment(new Interval("2013-01-03/P1D"))));
Assert.assertEquals(3, segments.size());
List<LogicalSegment> expected = Arrays.asList(createLogicalSegment(new Interval("2013-01-01/P1D")), createLogicalSegment(new Interval("2013-01-01T01/PT1H")), createLogicalSegment(new Interval("2013-01-01T02/PT1H")));
for (int i = 0; i < segments.size(); i++) {
Assert.assertEquals(segments.get(i).getInterval(), expected.get(i).getInterval());
}
}
use of org.joda.time.Interval in project druid by druid-io.
the class TimeBoundaryQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TimeBoundaryResultValue>, Object, TimeBoundaryQuery> strategy = new TimeBoundaryQueryQueryToolChest().getCacheStrategy(new TimeBoundaryQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, null, null));
final Result<TimeBoundaryResultValue> result = new Result<>(new DateTime(123L), new TimeBoundaryResultValue(ImmutableMap.of(TimeBoundaryQuery.MIN_TIME, new DateTime(0L).toString(), TimeBoundaryQuery.MAX_TIME, new DateTime("2015-01-01").toString())));
Object preparedValue = strategy.prepareForCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TimeBoundaryResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
use of org.joda.time.Interval in project druid by druid-io.
the class TopNQueryQueryToolChestTest method testComputeCacheKeyWithDifferentPostAgg.
@Test
public void testComputeCacheKeyWithDifferentPostAgg() throws Exception {
final TopNQuery query1 = new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("post"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("post", 10)), null);
final TopNQuery query2 = new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("post"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ArithmeticPostAggregator("post", "+", ImmutableList.<PostAggregator>of(new FieldAccessPostAggregator(null, "metric1"), new FieldAccessPostAggregator(null, "metric1")))), null);
final CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy1 = new TopNQueryQueryToolChest(null, null).getCacheStrategy(query1);
final CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy2 = new TopNQueryQueryToolChest(null, null).getCacheStrategy(query2);
Assert.assertFalse(Arrays.equals(strategy1.computeCacheKey(query1), strategy2.computeCacheKey(query2)));
}
Aggregations