use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class TimeBoundaryQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TimeBoundaryResultValue>, Object, TimeBoundaryQuery> strategy = new TimeBoundaryQueryQueryToolChest().getCacheStrategy(new TimeBoundaryQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))), null, null, null));
final Result<TimeBoundaryResultValue> result = new Result<>(DateTimes.utc(123L), new TimeBoundaryResultValue(ImmutableMap.of(TimeBoundaryQuery.MIN_TIME, DateTimes.EPOCH.toString(), TimeBoundaryQuery.MAX_TIME, DateTimes.of("2015-01-01").toString())));
Object preparedValue = strategy.prepareForSegmentLevelCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TimeBoundaryResultValue> fromCacheResult = strategy.pullFromSegmentLevelCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class SegmentManager method getIndexedTables.
/**
* Returns the collection of {@link IndexedTable} for the entire timeline (since join conditions do not currently
* consider the queries intervals), if the timeline exists for each of its segments that are joinable.
*/
public Optional<Stream<ReferenceCountingIndexedTable>> getIndexedTables(DataSourceAnalysis analysis) {
return getTimeline(analysis).map(timeline -> {
// join doesn't currently consider intervals, so just consider all segments
final Stream<ReferenceCountingSegment> segments = timeline.lookup(Intervals.ETERNITY).stream().flatMap(x -> StreamSupport.stream(x.getObject().payloads().spliterator(), false));
final TableDataSource tableDataSource = getTableDataSource(analysis);
ConcurrentHashMap<SegmentId, ReferenceCountingIndexedTable> tables = Optional.ofNullable(dataSources.get(tableDataSource.getName())).map(DataSourceState::getTablesLookup).orElseThrow(() -> new ISE("Datasource %s does not have IndexedTables", tableDataSource.getName()));
return segments.map(segment -> tables.get(segment.getId())).filter(Objects::nonNull);
});
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class UnifiedIndexerAppenderatorsManager method getBundle.
@VisibleForTesting
<T> DatasourceBundle getBundle(final Query<T> query) {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final TableDataSource table = analysis.getBaseTableDataSource().orElseThrow(() -> new ISE("Cannot handle datasource: %s", analysis.getDataSource()));
final DatasourceBundle bundle;
synchronized (this) {
bundle = datasourceBundles.get(table.getName());
}
if (bundle == null) {
throw new IAE("Could not find segment walker for datasource [%s]", table.getName());
}
return bundle;
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class SegmentMetadataQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
SegmentMetadataQuery query = new SegmentMetadataQuery(new TableDataSource("dummy"), new LegacySegmentSpec("2015-01-01/2015-01-02"), null, null, null, null, false, false);
CacheStrategy<SegmentAnalysis, SegmentAnalysis, SegmentMetadataQuery> strategy = new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig()).getCacheStrategy(query);
// Test cache key generation
byte[] expectedKey = { 0x04, 0x09, 0x01, 0x0A, 0x00, 0x00, 0x00, 0x03, 0x00, 0x02, 0x04 };
byte[] actualKey = strategy.computeCacheKey(query);
Assert.assertArrayEquals(expectedKey, actualKey);
SegmentAnalysis result = new SegmentAnalysis("testSegment", ImmutableList.of(Intervals.of("2011-01-12T00:00:00.000Z/2011-04-15T00:00:00.001Z")), ImmutableMap.of("placement", new ColumnAnalysis(ColumnType.STRING, ValueType.STRING.name(), true, false, 10881, 1, "preferred", "preferred", null)), 71982, 100, null, null, null, null);
Object preparedValue = strategy.prepareForSegmentLevelCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
SegmentAnalysis fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
SegmentAnalysis fromCacheResult = strategy.pullFromSegmentLevelCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class ResultRowTest method testMapBasedRowWithNullValues.
@Test
public void testMapBasedRowWithNullValues() {
GroupByQuery query = new GroupByQuery(new TableDataSource(QueryRunnerTestHelper.DATA_SOURCE), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2011/2012"))), null, null, Granularities.ALL, ImmutableList.of(new DefaultDimensionSpec("dim1", "dim1"), new DefaultDimensionSpec("dim2", "dim2"), new DefaultDimensionSpec("dim3", "dim3")), ImmutableList.of(new CountAggregatorFactory("count")), null, null, null, null, null);
final ResultRow row = ResultRow.of("1", "2", null);
MapBasedRow mapBasedRow = row.toMapBasedRow(query);
// Let's make sure values are there as expected
Assert.assertEquals("1", mapBasedRow.getRaw("dim1"));
Assert.assertEquals("2", mapBasedRow.getRaw("dim2"));
Assert.assertNull(mapBasedRow.getRaw("dim3"));
// Also, let's make sure that the dimension with null value is actually present in the map
Assert.assertTrue(mapBasedRow.getEvent().containsKey("dim3"));
}
Aggregations