use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BalancerStrategyBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
switch(mode) {
case "50percentOfSegmentsToConsiderPerMove":
percentOfSegmentsToConsider = 50;
useBatchedSegmentSampler = false;
break;
case "useBatchedSegmentSampler":
reservoirSize = maxSegmentsToMove;
useBatchedSegmentSampler = true;
break;
default:
}
List<List<DataSegment>> segmentList = new ArrayList<>(NUMBER_OF_SERVERS);
IntStream.range(0, NUMBER_OF_SERVERS).forEach(i -> segmentList.add(new ArrayList<>()));
for (int i = 0; i < numberOfSegments; i++) {
segmentList.get(RANDOM.nextInt(NUMBER_OF_SERVERS)).add(new DataSegment("test", TEST_SEGMENT_INTERVAL, String.valueOf(i), Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), null, 0, 10L));
}
for (List<DataSegment> segments : segmentList) {
serverHolders.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("id", "host", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("test", new ImmutableDruidDataSource("test", Collections.emptyMap(), segments)), segments.size()), new LoadQueuePeonTester()));
}
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidSchemaInternRowSignatureBenchmark method setup.
@Setup
public void setup() {
druidSchema = new DruidSchemaForBenchmark(EasyMock.mock(QueryLifecycleFactory.class), EasyMock.mock(TimelineServerView.class), null, null, EasyMock.mock(PlannerConfig.class), null, null);
DruidServerMetadata serverMetadata = new DruidServerMetadata("dummy", "dummy", "dummy", 42, ServerType.HISTORICAL, "tier-0", 0);
DataSegment.Builder builder = DataSegment.builder().dataSource("dummy").shardSpec(new LinearShardSpec(0)).dimensions(ImmutableList.of("col1", "col2", "col3", "col4")).version("1").size(0);
for (int i = 0; i < 10000; ++i) {
DataSegment dataSegment = builder.interval(Intervals.of(i + "/" + (i + 1))).build();
druidSchema.addSegment(serverMetadata, dataSegment);
}
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ServerViewUtil method getTargetLocations.
public static List<LocatedSegmentDescriptor> getTargetLocations(TimelineServerView serverView, DataSource datasource, List<Interval> intervals, int numCandidates) {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(datasource);
final Optional<? extends TimelineLookup<String, ServerSelector>> maybeTimeline = serverView.getTimeline(analysis);
if (!maybeTimeline.isPresent()) {
return Collections.emptyList();
}
List<LocatedSegmentDescriptor> located = new ArrayList<>();
for (Interval interval : intervals) {
for (TimelineObjectHolder<String, ServerSelector> holder : maybeTimeline.get().lookup(interval)) {
for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
ServerSelector selector = chunk.getObject();
final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
long size = selector.getSegment().getSize();
List<DruidServerMetadata> candidates = selector.getCandidates(numCandidates);
located.add(new LocatedSegmentDescriptor(descriptor, size, candidates));
}
}
}
return located;
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BrokerServerViewTest method setupViews.
private void setupViews(Set<String> watchedTiers, Set<String> ignoredTiers, boolean watchRealtimeTasks) throws Exception {
baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {
@Override
public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
super.registerSegmentCallback(exec, new SegmentCallback() {
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
CallbackAction res = callback.segmentAdded(server, segment);
segmentAddedLatch.countDown();
return res;
}
@Override
public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
CallbackAction res = callback.segmentRemoved(server, segment);
segmentRemovedLatch.countDown();
return res;
}
@Override
public CallbackAction segmentViewInitialized() {
CallbackAction res = callback.segmentViewInitialized();
segmentViewInitLatch.countDown();
return res;
}
});
}
};
brokerServerView = new BrokerServerView(EasyMock.createMock(QueryToolChestWarehouse.class), EasyMock.createMock(QueryWatcher.class), getSmileMapper(), EasyMock.createMock(HttpClient.class), baseView, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()), new NoopServiceEmitter(), new BrokerSegmentWatcherConfig() {
@Override
public Set<String> getWatchedTiers() {
return watchedTiers;
}
@Override
public boolean isWatchRealtimeTasks() {
return watchRealtimeTasks;
}
@Override
public Set<String> getIgnoredTiers() {
return ignoredTiers;
}
});
baseView.start();
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class HttpServerInventoryViewTest method testSimple.
@Test(timeout = 60_000L)
public void testSimple() throws Exception {
ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(DataNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
final DataSegment segment1 = new DataSegment("test1", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment2 = new DataSegment("test2", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment3 = new DataSegment("test3", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment4 = new DataSegment("test4", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
final DataSegment segment5 = new DataSegment("non-loading-datasource", Intervals.of("2014/2015"), "v1", null, null, null, null, 0, 0);
TestHttpClient httpClient = new TestHttpClient(ImmutableList.of(Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment1)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestDrop(segment1), new SegmentChangeRequestLoad(segment2), new SegmentChangeRequestLoad(segment3)))))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(true, "force reset counter", ChangeRequestHistory.Counter.ZERO, ImmutableList.of())))), Futures.immediateFuture(new ByteArrayInputStream(jsonMapper.writerWithType(HttpServerInventoryView.SEGMENT_LIST_RESP_TYPE_REF).writeValueAsBytes(new ChangeRequestsSnapshot(false, null, ChangeRequestHistory.Counter.ZERO, ImmutableList.of(new SegmentChangeRequestLoad(segment3), new SegmentChangeRequestLoad(segment4), new SegmentChangeRequestLoad(segment5))))))));
DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 8080, null, true, false), NodeRole.HISTORICAL, ImmutableMap.of(DataNodeService.DISCOVERY_SERVICE_KEY, new DataNodeService("tier", 1000, ServerType.HISTORICAL, 0)));
HttpServerInventoryView httpServerInventoryView = new HttpServerInventoryView(jsonMapper, httpClient, druidNodeDiscoveryProvider, (pair) -> !pair.rhs.getDataSource().equals("non-loading-datasource"), new HttpServerInventoryViewConfig(null, null, null), "test");
CountDownLatch initializeCallback1 = new CountDownLatch(1);
Map<SegmentId, CountDownLatch> segmentAddLathces = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1), segment3.getId(), new CountDownLatch(1), segment4.getId(), new CountDownLatch(1));
Map<SegmentId, CountDownLatch> segmentDropLatches = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1));
httpServerInventoryView.registerSegmentCallback(Execs.directExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
segmentAddLathces.get(segment.getId()).countDown();
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
segmentDropLatches.get(segment.getId()).countDown();
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentViewInitialized() {
initializeCallback1.countDown();
return ServerView.CallbackAction.CONTINUE;
}
});
final CountDownLatch serverRemovedCalled = new CountDownLatch(1);
httpServerInventoryView.registerServerRemovedCallback(Execs.directExecutor(), new ServerView.ServerRemovedCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
if (server.getName().equals("host:8080")) {
serverRemovedCalled.countDown();
return ServerView.CallbackAction.CONTINUE;
} else {
throw new RE("Unknown server [%s]", server.getName());
}
}
});
httpServerInventoryView.start();
druidNodeDiscovery.listener.nodesAdded(ImmutableList.of(druidNode));
initializeCallback1.await();
segmentAddLathces.get(segment1.getId()).await();
segmentDropLatches.get(segment1.getId()).await();
segmentAddLathces.get(segment2.getId()).await();
segmentAddLathces.get(segment3.getId()).await();
segmentAddLathces.get(segment4.getId()).await();
segmentDropLatches.get(segment2.getId()).await();
DruidServer druidServer = httpServerInventoryView.getInventoryValue("host:8080");
Assert.assertEquals(ImmutableMap.of(segment3.getId(), segment3, segment4.getId(), segment4), Maps.uniqueIndex(druidServer.iterateAllSegments(), DataSegment::getId));
druidNodeDiscovery.listener.nodesRemoved(ImmutableList.of(druidNode));
serverRemovedCalled.await();
Assert.assertNull(httpServerInventoryView.getInventoryValue("host:8080"));
EasyMock.verify(druidNodeDiscoveryProvider);
httpServerInventoryView.stop();
}
Aggregations