use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class TopNQueryQueryToolChestTest method doTestCacheStrategy.
private void doTestCacheStrategy(final ColumnType valueType, final Object dimValue) throws IOException {
CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy = new TopNQueryQueryToolChest(null, null).getCacheStrategy(new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test", valueType), new NumericTopNMetricSpec("metric1"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.of(new CountAggregatorFactory("metric1"), getComplexAggregatorFactoryForValueType(valueType.getType())), ImmutableList.of(new ConstantPostAggregator("post", 10)), null));
final Result<TopNResultValue> result1 = new Result<>(// test timestamps that result in integer size millis
DateTimes.utc(123L), new TopNResultValue(Collections.singletonList(ImmutableMap.of("test", dimValue, "metric1", 2, "complexMetric", getIntermediateComplexValue(valueType.getType(), dimValue)))));
Object preparedValue = strategy.prepareForSegmentLevelCache().apply(result1);
ObjectMapper objectMapper = TestHelper.makeJsonMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TopNResultValue> fromCacheResult = strategy.pullFromSegmentLevelCache().apply(fromCacheValue);
Assert.assertEquals(result1, fromCacheResult);
final Result<TopNResultValue> result2 = new Result<>(// test timestamps that result in integer size millis
DateTimes.utc(123L), new TopNResultValue(Collections.singletonList(ImmutableMap.of("test", dimValue, "metric1", 2, "complexMetric", dimValue, "post", 10))));
// Please see the comments on aggregator serde and type handling in CacheStrategy.fetchAggregatorsFromCache()
final Result<TopNResultValue> typeAdjustedResult2;
if (valueType.is(ValueType.FLOAT)) {
typeAdjustedResult2 = new Result<>(DateTimes.utc(123L), new TopNResultValue(Collections.singletonList(ImmutableMap.of("test", dimValue, "metric1", 2, "complexMetric", 2.1d, "post", 10))));
} else if (valueType.is(ValueType.LONG)) {
typeAdjustedResult2 = new Result<>(DateTimes.utc(123L), new TopNResultValue(Collections.singletonList(ImmutableMap.of("test", dimValue, "metric1", 2, "complexMetric", 2, "post", 10))));
} else {
typeAdjustedResult2 = result2;
}
Object preparedResultCacheValue = strategy.prepareForCache(true).apply(result2);
Object fromResultCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedResultCacheValue), strategy.getCacheObjectClazz());
Result<TopNResultValue> fromResultCacheResult = strategy.pullFromCache(true).apply(fromResultCacheValue);
Assert.assertEquals(typeAdjustedResult2, fromResultCacheResult);
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class LoggingRequestLogger method logNativeQuery.
@Override
public void logNativeQuery(RequestLogLine requestLogLine) throws IOException {
final Map mdc = MDC.getCopyOfContextMap();
// MDC must be set during the `LOG.info` call at the end of the try block.
try {
if (setMDC) {
try {
final Query query = requestLogLine.getQuery();
MDC.put("queryId", query.getId());
MDC.put(BaseQuery.SQL_QUERY_ID, StringUtils.nullToEmptyNonDruidDataString(query.getSqlQueryId()));
MDC.put("dataSource", String.join(",", query.getDataSource().getTableNames()));
MDC.put("queryType", query.getType());
MDC.put("isNested", String.valueOf(!(query.getDataSource() instanceof TableDataSource)));
MDC.put("hasFilters", Boolean.toString(query.hasFilters()));
MDC.put("remoteAddr", requestLogLine.getRemoteAddr());
MDC.put("duration", query.getDuration().toString());
MDC.put("descending", Boolean.toString(query.isDescending()));
if (setContextMDC) {
final Iterable<Map.Entry<String, Object>> entries = query.getContext() == null ? ImmutableList.of() : query.getContext().entrySet();
for (Map.Entry<String, Object> entry : entries) {
MDC.put(entry.getKey(), entry.getValue() == null ? "NULL" : entry.getValue().toString());
}
}
} catch (RuntimeException re) {
LOG.error(re, "Error preparing MDC");
}
}
final String line = requestLogLine.getNativeQueryLine(mapper);
// MDC must be set here
LOG.info("%s", line);
} finally {
if (setMDC) {
if (mdc != null) {
MDC.setContextMap(mdc);
} else {
MDC.clear();
}
}
}
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class TimeBoundaryQueryRunnerTest method testMergeResults.
@Test
public void testMergeResults() {
List<Result<TimeBoundaryResultValue>> results = Arrays.asList(new Result<>(DateTimes.nowUtc(), new TimeBoundaryResultValue(ImmutableMap.of("maxTime", "2012-01-01", "minTime", "2011-01-01"))), new Result<>(DateTimes.nowUtc(), new TimeBoundaryResultValue(ImmutableMap.of("maxTime", "2012-02-01", "minTime", "2011-01-01"))));
TimeBoundaryQuery query = new TimeBoundaryQuery(new TableDataSource("test"), null, null, null, null);
Iterable<Result<TimeBoundaryResultValue>> actual = query.mergeResults(results);
Assert.assertTrue(actual.iterator().next().getValue().getMaxTime().equals(DateTimes.of("2012-02-01")));
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class TimeBoundaryQueryRunnerTest method testMergeResultsEmptyResults.
@Test
public void testMergeResultsEmptyResults() {
List<Result<TimeBoundaryResultValue>> results = new ArrayList<>();
TimeBoundaryQuery query = new TimeBoundaryQuery(new TableDataSource("test"), null, null, null, null);
Iterable<Result<TimeBoundaryResultValue>> actual = query.mergeResults(results);
Assert.assertFalse(actual.iterator().hasNext());
}
use of org.apache.druid.query.TableDataSource in project druid by druid-io.
the class ScanQuerySpecTest method testSerialization.
@Test
public void testSerialization() throws Exception {
String legacy = "{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"filter\":null," + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"limit\":3," + "\"context\":null}";
String current = "{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"virtualColumns\":[]," + "\"resultFormat\":\"list\"," + "\"batchSize\":20480," + "\"limit\":3," + "\"filter\":null," + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"context\":null," + "\"descending\":false," + "\"granularity\":{\"type\":\"all\"}}";
ScanQuery query = new ScanQuery(new TableDataSource(QueryRunnerTestHelper.DATA_SOURCE), new LegacySegmentSpec(Intervals.of("2011-01-12/2011-01-14")), VirtualColumns.EMPTY, ScanQuery.ResultFormat.RESULT_FORMAT_LIST, 0, 0, 3, ScanQuery.Order.NONE, null, null, Arrays.asList("market", "quality", "index"), null, null);
String actual = JSON_MAPPER.writeValueAsString(query);
Assert.assertEquals(current, actual);
Assert.assertEquals(query, JSON_MAPPER.readValue(actual, ScanQuery.class));
Assert.assertEquals(query, JSON_MAPPER.readValue(legacy, ScanQuery.class));
}
Aggregations