use of io.druid.client.selector.QueryableDruidServer in project druid by druid-io.
the class BrokerServerView method addServer.
private QueryableDruidServer addServer(DruidServer server) {
QueryableDruidServer retVal = new QueryableDruidServer(server, makeDirectClient(server));
QueryableDruidServer exists = clients.put(server.getName(), retVal);
if (exists != null) {
log.warn("QueryRunner for server[%s] already existed!? Well it's getting replaced", server);
}
return retVal;
}
use of io.druid.client.selector.QueryableDruidServer in project druid by druid-io.
the class BrokerServerView method serverRemovedSegment.
private void serverRemovedSegment(DruidServerMetadata server, DataSegment segment) {
String segmentId = segment.getIdentifier();
final ServerSelector selector;
synchronized (lock) {
log.debug("Removing segment[%s] from server[%s].", segmentId, server);
selector = selectors.get(segmentId);
if (selector == null) {
log.warn("Told to remove non-existant segment[%s]", segmentId);
return;
}
QueryableDruidServer queryableDruidServer = clients.get(server.getName());
if (!selector.removeServer(queryableDruidServer)) {
log.warn("Asked to disassociate non-existant association between server[%s] and segment[%s]", server, segmentId);
}
if (selector.isEmpty()) {
VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource());
selectors.remove(segmentId);
final PartitionChunk<ServerSelector> removedPartition = timeline.remove(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector));
if (removedPartition == null) {
log.warn("Asked to remove timeline entry[interval: %s, version: %s] that doesn't exist", segment.getInterval(), segment.getVersion());
}
}
}
}
use of io.druid.client.selector.QueryableDruidServer in project druid by druid-io.
the class BrokerServerView method serverAddedSegment.
private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) {
String segmentId = segment.getIdentifier();
synchronized (lock) {
log.debug("Adding segment[%s] for server[%s]", segment, server);
ServerSelector selector = selectors.get(segmentId);
if (selector == null) {
selector = new ServerSelector(segment, tierSelectorStrategy);
VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource());
if (timeline == null) {
timeline = new VersionedIntervalTimeline<>(Ordering.natural());
timelines.put(segment.getDataSource(), timeline);
}
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector));
selectors.put(segmentId, selector);
}
QueryableDruidServer queryableDruidServer = clients.get(server.getName());
if (queryableDruidServer == null) {
queryableDruidServer = addServer(baseView.getInventoryValue(server.getName()));
}
selector.addServerAndUpdateSegment(queryableDruidServer, segment);
}
}
use of io.druid.client.selector.QueryableDruidServer in project druid by druid-io.
the class BrokerServerView method clear.
public void clear() {
synchronized (lock) {
final Iterator<String> clientsIter = clients.keySet().iterator();
while (clientsIter.hasNext()) {
clientsIter.remove();
}
timelines.clear();
final Iterator<ServerSelector> selectorsIter = selectors.values().iterator();
while (selectorsIter.hasNext()) {
final ServerSelector selector = selectorsIter.next();
selectorsIter.remove();
while (!selector.isEmpty()) {
final QueryableDruidServer pick = selector.pick();
selector.removeServer(pick);
}
}
}
}
use of io.druid.client.selector.QueryableDruidServer in project druid by druid-io.
the class CachingClusteredClientTest method testCachingOverBulkLimitEnforcesLimit.
@Test
@SuppressWarnings("unchecked")
public void testCachingOverBulkLimitEnforcesLimit() throws Exception {
final int limit = 10;
final Interval interval = new Interval("2011-01-01/2011-01-02");
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(interval))).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).build();
final Map<String, Object> context = new HashMap<>();
final Cache cache = EasyMock.createStrictMock(Cache.class);
final Capture<Iterable<Cache.NamedKey>> cacheKeyCapture = EasyMock.newCapture();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.<Cache.NamedKey, byte[]>of()).once();
EasyMock.replay(cache);
client = makeClient(MoreExecutors.sameThreadExecutor(), cache, limit);
final DruidServer lastServer = servers[random.nextInt(servers.length)];
final DataSegment dataSegment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(dataSegment.getIdentifier()).andReturn(DATA_SOURCE).anyTimes();
EasyMock.replay(dataSegment);
final ServerSelector selector = new ServerSelector(dataSegment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(lastServer, null), dataSegment);
timeline.add(interval, "v", new SingleElementPartitionChunk<>(selector));
client.run(query, context);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache key below limit", ImmutableList.copyOf(cacheKeyCapture.getValue()).size() <= limit);
EasyMock.verify(cache);
EasyMock.reset(cache);
cacheKeyCapture.reset();
EasyMock.expect(cache.getBulk(EasyMock.capture(cacheKeyCapture))).andReturn(ImmutableMap.<Cache.NamedKey, byte[]>of()).once();
EasyMock.replay(cache);
client = makeClient(MoreExecutors.sameThreadExecutor(), cache, 0);
client.run(query, context);
EasyMock.verify(cache);
EasyMock.verify(dataSegment);
Assert.assertTrue("Capture cache keys", cacheKeyCapture.hasCaptured());
Assert.assertTrue("Cache Keys empty", ImmutableList.copyOf(cacheKeyCapture.getValue()).isEmpty());
}
Aggregations