use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.
the class BalanceSegments method moveSegment.
protected boolean moveSegment(final BalancerSegmentHolder segment, final ImmutableDruidServer toServer, final DruidCoordinatorRuntimeParams params) {
final LoadQueuePeon toPeon = params.getLoadManagementPeons().get(toServer.getName());
final ImmutableDruidServer fromServer = segment.getFromServer();
final DataSegment segmentToMove = segment.getSegment();
final SegmentId segmentId = segmentToMove.getId();
if (!toPeon.getSegmentsToLoad().contains(segmentToMove) && (toServer.getSegment(segmentId) == null) && new ServerHolder(toServer, toPeon).getAvailableSize() > segmentToMove.getSize()) {
log.debug("Moving [%s] from [%s] to [%s]", segmentId, fromServer.getName(), toServer.getName());
LoadPeonCallback callback = null;
try {
ConcurrentMap<SegmentId, BalancerSegmentHolder> movingSegments = currentlyMovingSegments.get(toServer.getTier());
movingSegments.put(segmentId, segment);
callback = () -> movingSegments.remove(segmentId);
coordinator.moveSegment(params, fromServer, toServer, segmentToMove, callback);
return true;
} catch (Exception e) {
log.makeAlert(e, StringUtils.format("[%s] : Moving exception", segmentId)).emit();
if (callback != null) {
callback.execute();
}
}
}
return false;
}
use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.
the class MovingAverageQueryTest method testQuery.
/**
* Validate that the specified query behaves correctly.
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testQuery() throws IOException {
Query<?> query = jsonMapper.readValue(getQueryString(), Query.class);
Assert.assertThat(query, IsInstanceOf.instanceOf(getExpectedQueryType()));
List<MapBasedRow> expectedResults = jsonMapper.readValue(getExpectedResultString(), getExpectedResultType());
Assert.assertNotNull(expectedResults);
Assert.assertThat(expectedResults, IsInstanceOf.instanceOf(List.class));
CachingClusteredClient baseClient = new CachingClusteredClient(warehouse, new TimelineServerView() {
@Override
public Optional<? extends TimelineLookup<String, ServerSelector>> getTimeline(DataSourceAnalysis analysis) {
return Optional.empty();
}
@Override
public List<ImmutableDruidServer> getDruidServers() {
return null;
}
@Override
public <T> QueryRunner<T> getQueryRunner(DruidServer server) {
return null;
}
@Override
public void registerTimelineCallback(Executor exec, TimelineCallback callback) {
}
@Override
public void registerSegmentCallback(Executor exec, SegmentCallback callback) {
}
@Override
public void registerServerRemovedCallback(Executor exec, ServerRemovedCallback callback) {
}
}, MapCache.create(100000), jsonMapper, new ForegroundCachePopulator(jsonMapper, new CachePopulatorStats(), -1), new CacheConfig(), new DruidHttpClientConfig() {
@Override
public long getMaxQueuedBytes() {
return 0L;
}
}, new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
}, ForkJoinPool.commonPool(), QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
ClientQuerySegmentWalker walker = new ClientQuerySegmentWalker(new ServiceEmitter("", "", null) {
@Override
public void emit(Event event) {
}
}, baseClient, null, /* local client; unused in this test, so pass in null */
warehouse, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), retryConfig, jsonMapper, serverConfig, null, new CacheConfig());
defineMocks();
QueryPlus queryPlus = QueryPlus.wrap(query);
final Sequence<?> res = query.getRunner(walker).run(queryPlus);
List actualResults = new ArrayList();
actualResults = (List<MapBasedRow>) res.accumulate(actualResults, Accumulators.list());
expectedResults = consistentTypeCasting(expectedResults);
actualResults = consistentTypeCasting(actualResults);
Assert.assertEquals(expectedResults, actualResults);
}
use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.
the class BalancerStrategyBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
switch(mode) {
case "50percentOfSegmentsToConsiderPerMove":
percentOfSegmentsToConsider = 50;
useBatchedSegmentSampler = false;
break;
case "useBatchedSegmentSampler":
reservoirSize = maxSegmentsToMove;
useBatchedSegmentSampler = true;
break;
default:
}
List<List<DataSegment>> segmentList = new ArrayList<>(NUMBER_OF_SERVERS);
IntStream.range(0, NUMBER_OF_SERVERS).forEach(i -> segmentList.add(new ArrayList<>()));
for (int i = 0; i < numberOfSegments; i++) {
segmentList.get(RANDOM.nextInt(NUMBER_OF_SERVERS)).add(new DataSegment("test", TEST_SEGMENT_INTERVAL, String.valueOf(i), Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), null, 0, 10L));
}
for (List<DataSegment> segments : segmentList) {
serverHolders.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("id", "host", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("test", new ImmutableDruidDataSource("test", Collections.emptyMap(), segments)), segments.size()), new LoadQueuePeonTester()));
}
}
use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.
the class MarkAsUnusedOvershadowedSegments method addSegmentsFromServer.
private void addSegmentsFromServer(ServerHolder serverHolder, Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines) {
ImmutableDruidServer server = serverHolder.getServer();
for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
VersionedIntervalTimeline<String, DataSegment> timeline = timelines.computeIfAbsent(dataSource.getName(), dsName -> new VersionedIntervalTimeline<>(Comparator.naturalOrder()));
VersionedIntervalTimeline.addSegments(timeline, dataSource.getSegments().iterator());
}
}
use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.
the class DruidCluster method addHistorical.
private void addHistorical(ServerHolder serverHolder) {
final ImmutableDruidServer server = serverHolder.getServer();
final NavigableSet<ServerHolder> tierServers = historicals.computeIfAbsent(server.getTier(), k -> new TreeSet<>(Collections.reverseOrder()));
tierServers.add(serverHolder);
}
Aggregations