use of io.druid.query.TableDataSource in project druid by druid-io.
the class TimeBoundaryQueryRunnerTest method testMergeResultsEmptyResults.
@Test
public void testMergeResultsEmptyResults() throws Exception {
List<Result<TimeBoundaryResultValue>> results = Lists.newArrayList();
TimeBoundaryQuery query = new TimeBoundaryQuery(new TableDataSource("test"), null, null, null, null);
Iterable<Result<TimeBoundaryResultValue>> actual = query.mergeResults(results);
Assert.assertFalse(actual.iterator().hasNext());
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class TimeBoundaryQueryRunnerTest method testMergeResults.
@Test
public void testMergeResults() throws Exception {
List<Result<TimeBoundaryResultValue>> results = Arrays.asList(new Result<>(new DateTime(), new TimeBoundaryResultValue(ImmutableMap.of("maxTime", "2012-01-01", "minTime", "2011-01-01"))), new Result<>(new DateTime(), new TimeBoundaryResultValue(ImmutableMap.of("maxTime", "2012-02-01", "minTime", "2011-01-01"))));
TimeBoundaryQuery query = new TimeBoundaryQuery(new TableDataSource("test"), null, null, null, null);
Iterable<Result<TimeBoundaryResultValue>> actual = query.mergeResults(results);
Assert.assertTrue(actual.iterator().next().getValue().getMaxTime().equals(new DateTime("2012-02-01")));
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class CoordinatorServerViewTest method testSingleServerAddedRemovedSegment.
@Test
public void testSingleServerAddedRemovedSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(1);
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final DruidServer druidServer = new DruidServer("localhost:1234", "localhost:1234", 10000000L, "historical", "default_tier", 0);
setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
final DataSegment segment = dataSegmentWithIntervalAndVersion("2014-10-20T00:00:00Z/P1D", "v1");
announceSegmentForServer(druidServer, segment, zkPathsConfig, jsonMapper);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
List<TimelineObjectHolder> serverLookupRes = (List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"));
Assert.assertEquals(1, serverLookupRes.size());
TimelineObjectHolder<String, SegmentLoadInfo> actualTimelineObjectHolder = serverLookupRes.get(0);
Assert.assertEquals(new Interval("2014-10-20T00:00:00Z/P1D"), actualTimelineObjectHolder.getInterval());
Assert.assertEquals("v1", actualTimelineObjectHolder.getVersion());
PartitionHolder<SegmentLoadInfo> actualPartitionHolder = actualTimelineObjectHolder.getObject();
Assert.assertTrue(actualPartitionHolder.isComplete());
Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
SegmentLoadInfo segmentLoadInfo = actualPartitionHolder.iterator().next().getObject();
Assert.assertFalse(segmentLoadInfo.isEmpty());
Assert.assertEquals(druidServer.getMetadata(), Iterables.getOnlyElement(segmentLoadInfo.toImmutableSegmentLoadInfo().getServers()));
unannounceSegmentForServer(druidServer, segment);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"))).size());
Assert.assertNull(timeline.findEntry(new Interval("2014-10-20T00:00:00Z/P1D"), "v1"));
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class DruidSchema method computeTable.
private DruidTable computeTable(final String dataSource) {
final SegmentMetadataQuery segmentMetadataQuery = new SegmentMetadataQuery(new TableDataSource(dataSource), null, null, false, ImmutableMap.<String, Object>of("useCache", false, "populateCache", false), EnumSet.of(SegmentMetadataQuery.AnalysisType.INTERVAL), null, true);
final Sequence<SegmentAnalysis> sequence = segmentMetadataQuery.run(walker, Maps.<String, Object>newHashMap());
final List<SegmentAnalysis> results = Sequences.toList(sequence, Lists.<SegmentAnalysis>newArrayList());
if (results.isEmpty()) {
return null;
}
final Map<String, ValueType> columnTypes = Maps.newLinkedHashMap();
// Resolve conflicts by taking the latest metadata. This aids in gradual schema evolution.
long maxTimestamp = JodaUtils.MIN_INSTANT;
for (SegmentAnalysis analysis : results) {
final long timestamp;
if (analysis.getIntervals() != null && analysis.getIntervals().size() > 0) {
timestamp = analysis.getIntervals().get(analysis.getIntervals().size() - 1).getEndMillis();
} else {
timestamp = JodaUtils.MIN_INSTANT;
}
for (Map.Entry<String, ColumnAnalysis> entry : analysis.getColumns().entrySet()) {
if (entry.getValue().isError()) {
// Skip columns with analysis errors.
continue;
}
if (!columnTypes.containsKey(entry.getKey()) || timestamp >= maxTimestamp) {
ValueType valueType;
try {
valueType = ValueType.valueOf(entry.getValue().getType().toUpperCase());
} catch (IllegalArgumentException e) {
// Assume unrecognized types are some flavor of COMPLEX. This throws away information about exactly
// what kind of complex column it is, which we may want to preserve some day.
valueType = ValueType.COMPLEX;
}
columnTypes.put(entry.getKey(), valueType);
maxTimestamp = timestamp;
}
}
}
final RowSignature.Builder rowSignature = RowSignature.builder();
for (Map.Entry<String, ValueType> entry : columnTypes.entrySet()) {
rowSignature.add(entry.getKey(), entry.getValue());
}
return new DruidTable(new TableDataSource(dataSource), rowSignature.build());
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class ScanQuerySpecTest method testSerializationLegacyString.
@Test
public void testSerializationLegacyString() throws Exception {
String legacy = "{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"filter\":null," + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"limit\":3," + "\"context\":null}";
String current = "{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"resultFormat\":\"list\"," + "\"batchSize\":20480," + "\"limit\":3," + "\"filter\":null," + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"context\":null," + "\"descending\":false}";
ScanQuery query = new ScanQuery(new TableDataSource(QueryRunnerTestHelper.dataSource), new LegacySegmentSpec(new Interval("2011-01-12/2011-01-14")), null, 0, 3, null, Arrays.<String>asList("market", "quality", "index"), null);
String actual = jsonMapper.writeValueAsString(query);
Assert.assertEquals(current, actual);
Assert.assertEquals(query, jsonMapper.readValue(actual, ScanQuery.class));
Assert.assertEquals(query, jsonMapper.readValue(legacy, ScanQuery.class));
}
Aggregations