Search in sources :

Example 36 with DataSegment

use of io.druid.timeline.DataSegment in project druid by druid-io.

the class CachingClusteredClientTest method makeMockSingleDimensionSelector.

private ServerSelector makeMockSingleDimensionSelector(DruidServer server, String dimension, String start, String end, int partitionNum) {
    DataSegment segment = EasyMock.createNiceMock(DataSegment.class);
    EasyMock.expect(segment.getIdentifier()).andReturn(DATA_SOURCE).anyTimes();
    EasyMock.expect(segment.getShardSpec()).andReturn(new SingleDimensionShardSpec(dimension, start, end, partitionNum)).anyTimes();
    EasyMock.replay(segment);
    ServerSelector selector = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
    selector.addServerAndUpdateSegment(new QueryableDruidServer(server, null), segment);
    return selector;
}
Also used : ServerSelector(io.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(io.druid.client.selector.HighestPriorityTierSelectorStrategy) DataSegment(io.druid.timeline.DataSegment) SingleDimensionShardSpec(io.druid.timeline.partition.SingleDimensionShardSpec) RandomServerSelectorStrategy(io.druid.client.selector.RandomServerSelectorStrategy) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer)

Example 37 with DataSegment

use of io.druid.timeline.DataSegment in project druid by druid-io.

the class CachingClusteredClientTest method populateTimeline.

private List<Map<DruidServer, ServerExpectations>> populateTimeline(List<Interval> queryIntervals, List<List<Iterable<Result<Object>>>> expectedResults, int numQueryIntervals, List<Object> mocks) {
    timeline = new VersionedIntervalTimeline<>(Ordering.natural());
    final List<Map<DruidServer, ServerExpectations>> serverExpectationList = Lists.newArrayList();
    for (int k = 0; k < numQueryIntervals + 1; ++k) {
        final int numChunks = expectedResults.get(k).size();
        final TreeMap<DruidServer, ServerExpectations> serverExpectations = Maps.newTreeMap();
        serverExpectationList.add(serverExpectations);
        for (int j = 0; j < numChunks; ++j) {
            DruidServer lastServer = servers[random.nextInt(servers.length)];
            if (!serverExpectations.containsKey(lastServer)) {
                serverExpectations.put(lastServer, new ServerExpectations(lastServer, makeMock(mocks, QueryRunner.class)));
            }
            DataSegment mockSegment = makeMock(mocks, DataSegment.class);
            ServerExpectation expectation = new ServerExpectation(// interval/chunk
            String.format("%s_%s", k, j), queryIntervals.get(k), mockSegment, expectedResults.get(k).get(j));
            serverExpectations.get(lastServer).addExpectation(expectation);
            ServerSelector selector = new ServerSelector(expectation.getSegment(), new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
            selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), selector.getSegment());
            final ShardSpec shardSpec;
            if (numChunks == 1) {
                shardSpec = new SingleDimensionShardSpec("dimAll", null, null, 0);
            } else {
                String start = null;
                String end = null;
                if (j > 0) {
                    start = String.valueOf(j);
                }
                if (j + 1 < numChunks) {
                    end = String.valueOf(j + 1);
                }
                shardSpec = new SingleDimensionShardSpec("dim" + k, start, end, j);
            }
            EasyMock.expect(mockSegment.getShardSpec()).andReturn(shardSpec).anyTimes();
            timeline.add(queryIntervals.get(k), String.valueOf(k), shardSpec.createChunk(selector));
        }
    }
    return serverExpectationList;
}
Also used : QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) DataSegment(io.druid.timeline.DataSegment) SingleDimensionShardSpec(io.druid.timeline.partition.SingleDimensionShardSpec) ShardSpec(io.druid.timeline.partition.ShardSpec) NoneShardSpec(io.druid.timeline.partition.NoneShardSpec) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) ServerSelector(io.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(io.druid.client.selector.HighestPriorityTierSelectorStrategy) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) SingleDimensionShardSpec(io.druid.timeline.partition.SingleDimensionShardSpec) RandomServerSelectorStrategy(io.druid.client.selector.RandomServerSelectorStrategy)

Example 38 with DataSegment

use of io.druid.timeline.DataSegment in project druid by druid-io.

the class DruidCoordinator method getReplicationStatus.

public Map<String, CountingMap<String>> getReplicationStatus() {
    final Map<String, CountingMap<String>> retVal = Maps.newHashMap();
    if (segmentReplicantLookup == null) {
        return retVal;
    }
    final DateTime now = new DateTime();
    for (DataSegment segment : getAvailableDataSegments()) {
        List<Rule> rules = metadataRuleManager.getRulesWithDefault(segment.getDataSource());
        for (Rule rule : rules) {
            if (rule instanceof LoadRule && rule.appliesTo(segment, now)) {
                for (Map.Entry<String, Integer> entry : ((LoadRule) rule).getTieredReplicants().entrySet()) {
                    CountingMap<String> dataSourceMap = retVal.get(entry.getKey());
                    if (dataSourceMap == null) {
                        dataSourceMap = new CountingMap<>();
                        retVal.put(entry.getKey(), dataSourceMap);
                    }
                    int diff = Math.max(entry.getValue() - segmentReplicantLookup.getTotalReplicants(segment.getIdentifier(), entry.getKey()), 0);
                    dataSourceMap.add(segment.getDataSource(), diff);
                }
                break;
            }
        }
    }
    return retVal;
}
Also used : DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) CountingMap(io.druid.collections.CountingMap) LoadRule(io.druid.server.coordinator.rules.LoadRule) LoadRule(io.druid.server.coordinator.rules.LoadRule) Rule(io.druid.server.coordinator.rules.Rule) Map(java.util.Map) CountingMap(io.druid.collections.CountingMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 39 with DataSegment

use of io.druid.timeline.DataSegment in project druid by druid-io.

the class CachingClusteredClientTest method testIfNoneMatch.

@Test
public void testIfNoneMatch() throws Exception {
    Interval interval = new Interval("2016/2017");
    final DataSegment dataSegment = new DataSegment("dataSource", interval, "ver", ImmutableMap.<String, Object>of("type", "hdfs", "path", "/tmp"), ImmutableList.of("product"), ImmutableList.of("visited_sum"), NoneShardSpec.instance(), 9, 12334);
    final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
    selector.addServerAndUpdateSegment(new QueryableDruidServer(servers[0], null), dataSegment);
    timeline.add(interval, "ver", new SingleElementPartitionChunk<>(selector));
    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).context(ImmutableMap.<String, Object>of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).build();
    Map<String, String> responseContext = new HashMap<>();
    client.run(query, responseContext);
    Assert.assertEquals("Z/eS4rQz5v477iq7Aashr6JPZa0=", responseContext.get("ETag"));
}
Also used : HashMap(java.util.HashMap) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) TimeBoundaryQuery(io.druid.query.timeboundary.TimeBoundaryQuery) DataSegment(io.druid.timeline.DataSegment) QueryableDruidServer(io.druid.client.selector.QueryableDruidServer) ServerSelector(io.druid.client.selector.ServerSelector) HighestPriorityTierSelectorStrategy(io.druid.client.selector.HighestPriorityTierSelectorStrategy) RandomServerSelectorStrategy(io.druid.client.selector.RandomServerSelectorStrategy) Interval(org.joda.time.Interval) Test(org.junit.Test) GroupByQueryRunnerTest(io.druid.query.groupby.GroupByQueryRunnerTest)

Example 40 with DataSegment

use of io.druid.timeline.DataSegment in project druid by druid-io.

the class CoordinatorServerViewTest method testSingleServerAddedRemovedSegment.

@Test
public void testSingleServerAddedRemovedSegment() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(1);
    segmentRemovedLatch = new CountDownLatch(1);
    setupViews();
    final DruidServer druidServer = new DruidServer("localhost:1234", "localhost:1234", 10000000L, "historical", "default_tier", 0);
    setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
    final DataSegment segment = dataSegmentWithIntervalAndVersion("2014-10-20T00:00:00Z/P1D", "v1");
    announceSegmentForServer(druidServer, segment, zkPathsConfig, jsonMapper);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
    List<TimelineObjectHolder> serverLookupRes = (List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"));
    Assert.assertEquals(1, serverLookupRes.size());
    TimelineObjectHolder<String, SegmentLoadInfo> actualTimelineObjectHolder = serverLookupRes.get(0);
    Assert.assertEquals(new Interval("2014-10-20T00:00:00Z/P1D"), actualTimelineObjectHolder.getInterval());
    Assert.assertEquals("v1", actualTimelineObjectHolder.getVersion());
    PartitionHolder<SegmentLoadInfo> actualPartitionHolder = actualTimelineObjectHolder.getObject();
    Assert.assertTrue(actualPartitionHolder.isComplete());
    Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
    SegmentLoadInfo segmentLoadInfo = actualPartitionHolder.iterator().next().getObject();
    Assert.assertFalse(segmentLoadInfo.isEmpty());
    Assert.assertEquals(druidServer.getMetadata(), Iterables.getOnlyElement(segmentLoadInfo.toImmutableSegmentLoadInfo().getServers()));
    unannounceSegmentForServer(druidServer, segment);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"))).size());
    Assert.assertNull(timeline.findEntry(new Interval("2014-10-20T00:00:00Z/P1D"), "v1"));
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(io.druid.timeline.DataSegment) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) TableDataSource(io.druid.query.TableDataSource) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) TimelineLookup(io.druid.timeline.TimelineLookup) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

DataSegment (io.druid.timeline.DataSegment)296 Test (org.junit.Test)154 Interval (org.joda.time.Interval)136 File (java.io.File)56 DateTime (org.joda.time.DateTime)53 IOException (java.io.IOException)37 DruidServer (io.druid.client.DruidServer)36 Map (java.util.Map)35 ImmutableMap (com.google.common.collect.ImmutableMap)20 DruidDataSource (io.druid.client.DruidDataSource)19 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)18 List (java.util.List)18 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)16 Rule (io.druid.server.coordinator.rules.Rule)16 ForeverLoadRule (io.druid.server.coordinator.rules.ForeverLoadRule)14 IntervalDropRule (io.druid.server.coordinator.rules.IntervalDropRule)13 IntervalLoadRule (io.druid.server.coordinator.rules.IntervalLoadRule)13 GET (javax.ws.rs.GET)13 Produces (javax.ws.rs.Produces)13 Path (org.apache.hadoop.fs.Path)13