Search in sources :

Example 6 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class TimeBoundaryQueryRunnerTest method testMergeResultsEmptyResults.

@Test
public void testMergeResultsEmptyResults() throws Exception {
    List<Result<TimeBoundaryResultValue>> results = Lists.newArrayList();
    TimeBoundaryQuery query = new TimeBoundaryQuery(new TableDataSource("test"), null, null, null, null);
    Iterable<Result<TimeBoundaryResultValue>> actual = query.mergeResults(results);
    Assert.assertFalse(actual.iterator().hasNext());
}
Also used : TableDataSource(io.druid.query.TableDataSource) Result(io.druid.query.Result) Test(org.junit.Test)

Example 7 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class TimeBoundaryQueryRunnerTest method testMergeResults.

@Test
public void testMergeResults() throws Exception {
    List<Result<TimeBoundaryResultValue>> results = Arrays.asList(new Result<>(new DateTime(), new TimeBoundaryResultValue(ImmutableMap.of("maxTime", "2012-01-01", "minTime", "2011-01-01"))), new Result<>(new DateTime(), new TimeBoundaryResultValue(ImmutableMap.of("maxTime", "2012-02-01", "minTime", "2011-01-01"))));
    TimeBoundaryQuery query = new TimeBoundaryQuery(new TableDataSource("test"), null, null, null, null);
    Iterable<Result<TimeBoundaryResultValue>> actual = query.mergeResults(results);
    Assert.assertTrue(actual.iterator().next().getValue().getMaxTime().equals(new DateTime("2012-02-01")));
}
Also used : TableDataSource(io.druid.query.TableDataSource) DateTime(org.joda.time.DateTime) Result(io.druid.query.Result) Test(org.junit.Test)

Example 8 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class CoordinatorServerViewTest method testSingleServerAddedRemovedSegment.

@Test
public void testSingleServerAddedRemovedSegment() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(1);
    segmentRemovedLatch = new CountDownLatch(1);
    setupViews();
    final DruidServer druidServer = new DruidServer("localhost:1234", "localhost:1234", 10000000L, "historical", "default_tier", 0);
    setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
    final DataSegment segment = dataSegmentWithIntervalAndVersion("2014-10-20T00:00:00Z/P1D", "v1");
    announceSegmentForServer(druidServer, segment, zkPathsConfig, jsonMapper);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
    List<TimelineObjectHolder> serverLookupRes = (List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"));
    Assert.assertEquals(1, serverLookupRes.size());
    TimelineObjectHolder<String, SegmentLoadInfo> actualTimelineObjectHolder = serverLookupRes.get(0);
    Assert.assertEquals(new Interval("2014-10-20T00:00:00Z/P1D"), actualTimelineObjectHolder.getInterval());
    Assert.assertEquals("v1", actualTimelineObjectHolder.getVersion());
    PartitionHolder<SegmentLoadInfo> actualPartitionHolder = actualTimelineObjectHolder.getObject();
    Assert.assertTrue(actualPartitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
    SegmentLoadInfo segmentLoadInfo = actualPartitionHolder.iterator().next().getObject();
    Assert.assertFalse(segmentLoadInfo.isEmpty());
    Assert.assertEquals(druidServer.getMetadata(), Iterables.getOnlyElement(segmentLoadInfo.toImmutableSegmentLoadInfo().getServers()));
    unannounceSegmentForServer(druidServer, segment);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"))).size());
    Assert.assertNull(timeline.findEntry(new Interval("2014-10-20T00:00:00Z/P1D"), "v1"));
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(io.druid.timeline.DataSegment) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) TableDataSource(io.druid.query.TableDataSource) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) TimelineLookup(io.druid.timeline.TimelineLookup) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 9 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class DruidSchema method computeTable.

private DruidTable computeTable(final String dataSource) {
    final SegmentMetadataQuery segmentMetadataQuery = new SegmentMetadataQuery(new TableDataSource(dataSource), null, null, false, ImmutableMap.<String, Object>of("useCache", false, "populateCache", false), EnumSet.of(SegmentMetadataQuery.AnalysisType.INTERVAL), null, true);
    final Sequence<SegmentAnalysis> sequence = segmentMetadataQuery.run(walker, Maps.<String, Object>newHashMap());
    final List<SegmentAnalysis> results = Sequences.toList(sequence, Lists.<SegmentAnalysis>newArrayList());
    if (results.isEmpty()) {
        return null;
    }
    final Map<String, ValueType> columnTypes = Maps.newLinkedHashMap();
    // Resolve conflicts by taking the latest metadata. This aids in gradual schema evolution.
    long maxTimestamp = JodaUtils.MIN_INSTANT;
    for (SegmentAnalysis analysis : results) {
        final long timestamp;
        if (analysis.getIntervals() != null && analysis.getIntervals().size() > 0) {
            timestamp = analysis.getIntervals().get(analysis.getIntervals().size() - 1).getEndMillis();
        } else {
            timestamp = JodaUtils.MIN_INSTANT;
        }
        for (Map.Entry<String, ColumnAnalysis> entry : analysis.getColumns().entrySet()) {
            if (entry.getValue().isError()) {
                // Skip columns with analysis errors.
                continue;
            }
            if (!columnTypes.containsKey(entry.getKey()) || timestamp >= maxTimestamp) {
                ValueType valueType;
                try {
                    valueType = ValueType.valueOf(entry.getValue().getType().toUpperCase());
                } catch (IllegalArgumentException e) {
                    // Assume unrecognized types are some flavor of COMPLEX. This throws away information about exactly
                    // what kind of complex column it is, which we may want to preserve some day.
                    valueType = ValueType.COMPLEX;
                }
                columnTypes.put(entry.getKey(), valueType);
                maxTimestamp = timestamp;
            }
        }
    }
    final RowSignature.Builder rowSignature = RowSignature.builder();
    for (Map.Entry<String, ValueType> entry : columnTypes.entrySet()) {
        rowSignature.add(entry.getKey(), entry.getValue());
    }
    return new DruidTable(new TableDataSource(dataSource), rowSignature.build());
}
Also used : ValueType(io.druid.segment.column.ValueType) DruidTable(io.druid.sql.calcite.table.DruidTable) TableDataSource(io.druid.query.TableDataSource) SegmentMetadataQuery(io.druid.query.metadata.metadata.SegmentMetadataQuery) ColumnAnalysis(io.druid.query.metadata.metadata.ColumnAnalysis) SegmentAnalysis(io.druid.query.metadata.metadata.SegmentAnalysis) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) RowSignature(io.druid.sql.calcite.table.RowSignature)

Example 10 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class ScanQuerySpecTest method testSerializationLegacyString.

@Test
public void testSerializationLegacyString() throws Exception {
    String legacy = "{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"filter\":null," + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"limit\":3," + "\"context\":null}";
    String current = "{\"queryType\":\"scan\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"resultFormat\":\"list\"," + "\"batchSize\":20480," + "\"limit\":3," + "\"filter\":null," + "\"columns\":[\"market\",\"quality\",\"index\"]," + "\"context\":null," + "\"descending\":false}";
    ScanQuery query = new ScanQuery(new TableDataSource(QueryRunnerTestHelper.dataSource), new LegacySegmentSpec(new Interval("2011-01-12/2011-01-14")), null, 0, 3, null, Arrays.<String>asList("market", "quality", "index"), null);
    String actual = jsonMapper.writeValueAsString(query);
    Assert.assertEquals(current, actual);
    Assert.assertEquals(query, jsonMapper.readValue(actual, ScanQuery.class));
    Assert.assertEquals(query, jsonMapper.readValue(legacy, ScanQuery.class));
}
Also used : TableDataSource(io.druid.query.TableDataSource) LegacySegmentSpec(io.druid.query.spec.LegacySegmentSpec) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

TableDataSource (io.druid.query.TableDataSource)25 Interval (org.joda.time.Interval)18 Test (org.junit.Test)17 Result (io.druid.query.Result)8 MultipleIntervalSegmentSpec (io.druid.query.spec.MultipleIntervalSegmentSpec)7 TimelineObjectHolder (io.druid.timeline.TimelineObjectHolder)7 DateTime (org.joda.time.DateTime)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)5 DataSegment (io.druid.timeline.DataSegment)5 Pair (io.druid.java.util.common.Pair)4 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)4 TimelineLookup (io.druid.timeline.TimelineLookup)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 Function (com.google.common.base.Function)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Query (io.druid.query.Query)3 QueryRunner (io.druid.query.QueryRunner)3 SegmentDescriptor (io.druid.query.SegmentDescriptor)3 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)3