use of io.druid.timeline.DataSegment in project druid by druid-io.
the class CachingClusteredClientTest method makeMockSingleDimensionSelector.
private ServerSelector makeMockSingleDimensionSelector(DruidServer server, String dimension, String start, String end, int partitionNum) {
DataSegment segment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(segment.getIdentifier()).andReturn(DATA_SOURCE).anyTimes();
EasyMock.expect(segment.getShardSpec()).andReturn(new SingleDimensionShardSpec(dimension, start, end, partitionNum)).anyTimes();
EasyMock.replay(segment);
ServerSelector selector = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(server, null), segment);
return selector;
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class CachingClusteredClientTest method populateTimeline.
private List<Map<DruidServer, ServerExpectations>> populateTimeline(List<Interval> queryIntervals, List<List<Iterable<Result<Object>>>> expectedResults, int numQueryIntervals, List<Object> mocks) {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
final List<Map<DruidServer, ServerExpectations>> serverExpectationList = Lists.newArrayList();
for (int k = 0; k < numQueryIntervals + 1; ++k) {
final int numChunks = expectedResults.get(k).size();
final TreeMap<DruidServer, ServerExpectations> serverExpectations = Maps.newTreeMap();
serverExpectationList.add(serverExpectations);
for (int j = 0; j < numChunks; ++j) {
DruidServer lastServer = servers[random.nextInt(servers.length)];
if (!serverExpectations.containsKey(lastServer)) {
serverExpectations.put(lastServer, new ServerExpectations(lastServer, makeMock(mocks, QueryRunner.class)));
}
DataSegment mockSegment = makeMock(mocks, DataSegment.class);
ServerExpectation expectation = new ServerExpectation(// interval/chunk
String.format("%s_%s", k, j), queryIntervals.get(k), mockSegment, expectedResults.get(k).get(j));
serverExpectations.get(lastServer).addExpectation(expectation);
ServerSelector selector = new ServerSelector(expectation.getSegment(), new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), selector.getSegment());
final ShardSpec shardSpec;
if (numChunks == 1) {
shardSpec = new SingleDimensionShardSpec("dimAll", null, null, 0);
} else {
String start = null;
String end = null;
if (j > 0) {
start = String.valueOf(j);
}
if (j + 1 < numChunks) {
end = String.valueOf(j + 1);
}
shardSpec = new SingleDimensionShardSpec("dim" + k, start, end, j);
}
EasyMock.expect(mockSegment.getShardSpec()).andReturn(shardSpec).anyTimes();
timeline.add(queryIntervals.get(k), String.valueOf(k), shardSpec.createChunk(selector));
}
}
return serverExpectationList;
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class DruidCoordinator method getReplicationStatus.
public Map<String, CountingMap<String>> getReplicationStatus() {
final Map<String, CountingMap<String>> retVal = Maps.newHashMap();
if (segmentReplicantLookup == null) {
return retVal;
}
final DateTime now = new DateTime();
for (DataSegment segment : getAvailableDataSegments()) {
List<Rule> rules = metadataRuleManager.getRulesWithDefault(segment.getDataSource());
for (Rule rule : rules) {
if (rule instanceof LoadRule && rule.appliesTo(segment, now)) {
for (Map.Entry<String, Integer> entry : ((LoadRule) rule).getTieredReplicants().entrySet()) {
CountingMap<String> dataSourceMap = retVal.get(entry.getKey());
if (dataSourceMap == null) {
dataSourceMap = new CountingMap<>();
retVal.put(entry.getKey(), dataSourceMap);
}
int diff = Math.max(entry.getValue() - segmentReplicantLookup.getTotalReplicants(segment.getIdentifier(), entry.getKey()), 0);
dataSourceMap.add(segment.getDataSource(), diff);
}
break;
}
}
}
return retVal;
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class CachingClusteredClientTest method testIfNoneMatch.
@Test
public void testIfNoneMatch() throws Exception {
Interval interval = new Interval("2016/2017");
final DataSegment dataSegment = new DataSegment("dataSource", interval, "ver", ImmutableMap.<String, Object>of("type", "hdfs", "path", "/tmp"), ImmutableList.of("product"), ImmutableList.of("visited_sum"), NoneShardSpec.instance(), 9, 12334);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(servers[0], null), dataSegment);
timeline.add(interval, "ver", new SingleElementPartitionChunk<>(selector));
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).context(ImmutableMap.<String, Object>of("If-None-Match", "aVJV29CJY93rszVW/QBy0arWZo0=")).build();
Map<String, String> responseContext = new HashMap<>();
client.run(query, responseContext);
Assert.assertEquals("Z/eS4rQz5v477iq7Aashr6JPZa0=", responseContext.get("ETag"));
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class CoordinatorServerViewTest method testSingleServerAddedRemovedSegment.
@Test
public void testSingleServerAddedRemovedSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(1);
segmentRemovedLatch = new CountDownLatch(1);
setupViews();
final DruidServer druidServer = new DruidServer("localhost:1234", "localhost:1234", 10000000L, "historical", "default_tier", 0);
setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
final DataSegment segment = dataSegmentWithIntervalAndVersion("2014-10-20T00:00:00Z/P1D", "v1");
announceSegmentForServer(druidServer, segment, zkPathsConfig, jsonMapper);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
TimelineLookup timeline = overlordServerView.getTimeline(new TableDataSource("test_overlord_server_view"));
List<TimelineObjectHolder> serverLookupRes = (List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"));
Assert.assertEquals(1, serverLookupRes.size());
TimelineObjectHolder<String, SegmentLoadInfo> actualTimelineObjectHolder = serverLookupRes.get(0);
Assert.assertEquals(new Interval("2014-10-20T00:00:00Z/P1D"), actualTimelineObjectHolder.getInterval());
Assert.assertEquals("v1", actualTimelineObjectHolder.getVersion());
PartitionHolder<SegmentLoadInfo> actualPartitionHolder = actualTimelineObjectHolder.getObject();
Assert.assertTrue(actualPartitionHolder.isComplete());
Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
SegmentLoadInfo segmentLoadInfo = actualPartitionHolder.iterator().next().getObject();
Assert.assertFalse(segmentLoadInfo.isEmpty());
Assert.assertEquals(druidServer.getMetadata(), Iterables.getOnlyElement(segmentLoadInfo.toImmutableSegmentLoadInfo().getServers()));
unannounceSegmentForServer(druidServer, segment);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(new Interval("2014-10-20T00:00:00Z/P1D"))).size());
Assert.assertNull(timeline.findEntry(new Interval("2014-10-20T00:00:00Z/P1D"), "v1"));
}
Aggregations