Search in sources :

Example 26 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class DruidCoordinatorTest method testComputeUnderReplicationCountsPerDataSourcePerTierForSegmentsWithBroadcastRule.

@Test(timeout = 60_000L)
public void testComputeUnderReplicationCountsPerDataSourcePerTierForSegmentsWithBroadcastRule() throws Exception {
    final String dataSource = "dataSource";
    final String hotTierName = "hot";
    final String coldTierName = "cold";
    final String tierName1 = "tier1";
    final String tierName2 = "tier2";
    final Rule broadcastDistributionRule = new ForeverBroadcastDistributionRule();
    final String loadPathCold = "/druid/loadqueue/cold:1234";
    final String loadPathBroker1 = "/druid/loadqueue/broker1:1234";
    final String loadPathBroker2 = "/druid/loadqueue/broker2:1234";
    final String loadPathPeon = "/druid/loadqueue/peon:1234";
    final DruidServer hotServer = new DruidServer("hot", "hot", null, 5L, ServerType.HISTORICAL, hotTierName, 0);
    final DruidServer coldServer = new DruidServer("cold", "cold", null, 5L, ServerType.HISTORICAL, coldTierName, 0);
    final DruidServer brokerServer1 = new DruidServer("broker1", "broker1", null, 5L, ServerType.BROKER, tierName1, 0);
    final DruidServer brokerServer2 = new DruidServer("broker2", "broker2", null, 5L, ServerType.BROKER, tierName2, 0);
    final DruidServer peonServer = new DruidServer("peon", "peon", null, 5L, ServerType.INDEXER_EXECUTOR, tierName2, 0);
    final Map<String, DataSegment> dataSegments = ImmutableMap.of("2018-01-02T00:00:00.000Z_2018-01-03T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-02/P1D"), "v1", null, null, null, null, 0x9, 0), "2018-01-03T00:00:00.000Z_2018-01-04T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-03/P1D"), "v1", null, null, null, null, 0x9, 0), "2017-01-01T00:00:00.000Z_2017-01-02T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2017-01-01/P1D"), "v1", null, null, null, null, 0x9, 0));
    final LoadQueuePeon loadQueuePeonCold = new CuratorLoadQueuePeon(curator, loadPathCold, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_cold_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_cold-%d"), druidCoordinatorConfig);
    final LoadQueuePeon loadQueuePeonBroker1 = new CuratorLoadQueuePeon(curator, loadPathBroker1, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_broker1_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_broker1-%d"), druidCoordinatorConfig);
    final LoadQueuePeon loadQueuePeonBroker2 = new CuratorLoadQueuePeon(curator, loadPathBroker2, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_broker2_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_broker2-%d"), druidCoordinatorConfig);
    final LoadQueuePeon loadQueuePeonPoenServer = new CuratorLoadQueuePeon(curator, loadPathPeon, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_peon-%d"), druidCoordinatorConfig);
    final PathChildrenCache pathChildrenCacheCold = new PathChildrenCache(curator, loadPathCold, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_cold-%d"));
    final PathChildrenCache pathChildrenCacheBroker1 = new PathChildrenCache(curator, loadPathBroker1, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_broker1-%d"));
    final PathChildrenCache pathChildrenCacheBroker2 = new PathChildrenCache(curator, loadPathBroker2, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_broker2-%d"));
    final PathChildrenCache pathChildrenCachePeon = new PathChildrenCache(curator, loadPathPeon, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_peon-%d"));
    loadManagementPeons.putAll(ImmutableMap.of("hot", loadQueuePeon, "cold", loadQueuePeonCold, "broker1", loadQueuePeonBroker1, "broker2", loadQueuePeonBroker2, "peon", loadQueuePeonPoenServer));
    loadQueuePeonCold.start();
    loadQueuePeonBroker1.start();
    loadQueuePeonBroker2.start();
    loadQueuePeonPoenServer.start();
    pathChildrenCache.start();
    pathChildrenCacheCold.start();
    pathChildrenCacheBroker1.start();
    pathChildrenCacheBroker2.start();
    pathChildrenCachePeon.start();
    DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.emptyMap()) };
    dataSegments.values().forEach(druidDataSources[0]::addSegment);
    setupSegmentsMetadataMock(druidDataSources[0]);
    EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(broadcastDistributionRule)).atLeastOnce();
    EasyMock.expect(metadataRuleManager.getAllRules()).andReturn(ImmutableMap.of(dataSource, ImmutableList.of(broadcastDistributionRule))).atLeastOnce();
    EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(hotServer, coldServer, brokerServer1, brokerServer2, peonServer)).atLeastOnce();
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(metadataRuleManager, serverInventoryView);
    coordinator.start();
    // Wait for this coordinator to become leader
    leaderAnnouncerLatch.await();
    final CountDownLatch assignSegmentLatchHot = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCache, dataSegments, hotServer);
    final CountDownLatch assignSegmentLatchCold = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCacheCold, dataSegments, coldServer);
    final CountDownLatch assignSegmentLatchBroker1 = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCacheBroker1, dataSegments, brokerServer1);
    final CountDownLatch assignSegmentLatchBroker2 = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCacheBroker2, dataSegments, brokerServer2);
    final CountDownLatch assignSegmentLatchPeon = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCachePeon, dataSegments, peonServer);
    assignSegmentLatchHot.await();
    assignSegmentLatchCold.await();
    assignSegmentLatchBroker1.await();
    assignSegmentLatchBroker2.await();
    assignSegmentLatchPeon.await();
    final CountDownLatch coordinatorRunLatch = new CountDownLatch(2);
    serviceEmitter.latch = coordinatorRunLatch;
    coordinatorRunLatch.await();
    Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
    Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = coordinator.computeUnderReplicationCountsPerDataSourcePerTier();
    Assert.assertEquals(4, underReplicationCountsPerDataSourcePerTier.size());
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(hotTierName).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(coldTierName).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(tierName1).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(tierName2).getLong(dataSource));
    Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTierUsingClusterView = coordinator.computeUnderReplicationCountsPerDataSourcePerTierUsingClusterView();
    Assert.assertEquals(4, underReplicationCountsPerDataSourcePerTierUsingClusterView.size());
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(hotTierName).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(coldTierName).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(tierName1).getLong(dataSource));
    Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(tierName2).getLong(dataSource));
    coordinator.stop();
    leaderUnannouncerLatch.await();
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(segmentsMetadataManager);
    EasyMock.verify(metadataRuleManager);
}
Also used : ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) DruidDataSource(org.apache.druid.client.DruidDataSource) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) Rule(org.apache.druid.server.coordinator.rules.Rule) Test(org.junit.Test)

Example 27 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class DruidCoordinatorTest method testMoveSegment.

@Test
public void testMoveSegment() {
    final DataSegment segment = EasyMock.createNiceMock(DataSegment.class);
    EasyMock.expect(segment.getId()).andReturn(SegmentId.dummy("dummySegment"));
    EasyMock.expect(segment.getDataSource()).andReturn("dummyDataSource");
    EasyMock.replay(segment);
    loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
    EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
    loadQueuePeon.markSegmentToDrop(segment);
    EasyMock.expectLastCall().once();
    Capture<LoadPeonCallback> loadCallbackCapture = Capture.newInstance();
    Capture<LoadPeonCallback> dropCallbackCapture = Capture.newInstance();
    loadQueuePeon.loadSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(loadCallbackCapture));
    EasyMock.expectLastCall().once();
    loadQueuePeon.dropSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(dropCallbackCapture));
    EasyMock.expectLastCall().once();
    loadQueuePeon.unmarkSegmentToDrop(segment);
    EasyMock.expectLastCall().once();
    EasyMock.expect(loadQueuePeon.getSegmentsToDrop()).andReturn(new HashSet<>()).once();
    EasyMock.replay(loadQueuePeon);
    ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(segment);
    EasyMock.replay(druidDataSource);
    EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
    EasyMock.replay(segmentsMetadataManager);
    EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
    EasyMock.replay(dataSourcesSnapshot);
    scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
    EasyMock.replay(scheduledExecutorFactory);
    EasyMock.replay(metadataRuleManager);
    ImmutableDruidDataSource dataSource = EasyMock.createMock(ImmutableDruidDataSource.class);
    EasyMock.expect(dataSource.getSegments()).andReturn(Collections.singletonList(segment)).anyTimes();
    EasyMock.replay(dataSource);
    EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
    EasyMock.replay(druidServer);
    DruidServer druidServer2 = EasyMock.createMock(DruidServer.class);
    EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
    EasyMock.replay(druidServer2);
    loadManagementPeons.put("from", loadQueuePeon);
    loadManagementPeons.put("to", loadQueuePeon);
    EasyMock.expect(serverInventoryView.isSegmentLoadedByServer("to", segment)).andReturn(true).once();
    EasyMock.replay(serverInventoryView);
    mockCoordinatorRuntimeParams();
    coordinator.moveSegment(coordinatorRuntimeParams, druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), segment, null);
    LoadPeonCallback loadCallback = loadCallbackCapture.getValue();
    loadCallback.execute();
    LoadPeonCallback dropCallback = dropCallbackCapture.getValue();
    dropCallback.execute();
    EasyMock.verify(druidServer, druidServer2, loadQueuePeon, serverInventoryView, metadataRuleManager);
    EasyMock.verify(coordinatorRuntimeParams);
}
Also used : ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) SegmentId(org.apache.druid.timeline.SegmentId) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 28 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class CuratorDruidCoordinatorTest method testMoveSegment.

@Test
public void testMoveSegment() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(4);
    segmentRemovedLatch = new CountDownLatch(0);
    CountDownLatch destCountdown = new CountDownLatch(1);
    CountDownLatch srcCountdown = new CountDownLatch(1);
    setupView();
    DruidServer source = new DruidServer("localhost:1", "localhost:1", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
    DruidServer dest = new DruidServer("localhost:2", "localhost:2", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
    setupZNodeForServer(source, zkPathsConfig, jsonMapper);
    setupZNodeForServer(dest, zkPathsConfig, jsonMapper);
    final List<DataSegment> sourceSegments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-06/2011-04-09", "v1")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
    final List<DataSegment> destinationSegments = Lists.transform(ImmutableList.of(Pair.of("2011-03-31/2011-04-01", "v1")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
    DataSegment segmentToMove = sourceSegments.get(2);
    List<String> sourceSegKeys = new ArrayList<>();
    for (DataSegment segment : sourceSegments) {
        sourceSegKeys.add(announceBatchSegmentsForServer(source, ImmutableSet.of(segment), zkPathsConfig, jsonMapper));
    }
    for (DataSegment segment : destinationSegments) {
        announceBatchSegmentsForServer(dest, ImmutableSet.of(segment), zkPathsConfig, jsonMapper);
    }
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    // these child watchers are used to simulate actions of historicals, announcing a segment on noticing a load queue
    // for the destination and unannouncing from source server when noticing a drop request
    sourceLoadQueueChildrenCache.getListenable().addListener((CuratorFramework curatorFramework, PathChildrenCacheEvent event) -> {
        if (event.getType().equals(PathChildrenCacheEvent.Type.INITIALIZED)) {
            srcCountdown.countDown();
        } else if (CuratorUtils.isChildAdded(event)) {
            // Simulate source server dropping segment
            unannounceSegmentFromBatchForServer(source, segmentToMove, sourceSegKeys.get(2), zkPathsConfig);
        }
    });
    destinationLoadQueueChildrenCache.getListenable().addListener((CuratorFramework curatorFramework, PathChildrenCacheEvent event) -> {
        if (event.getType().equals(PathChildrenCacheEvent.Type.INITIALIZED)) {
            destCountdown.countDown();
        } else if (CuratorUtils.isChildAdded(event)) {
            // Simulate destination server loading segment
            announceBatchSegmentsForServer(dest, ImmutableSet.of(segmentToMove), zkPathsConfig, jsonMapper);
        }
    });
    sourceLoadQueueChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
    destinationLoadQueueChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
    Assert.assertTrue(timing.forWaiting().awaitLatch(srcCountdown));
    Assert.assertTrue(timing.forWaiting().awaitLatch(destCountdown));
    loadManagementPeons.put("localhost:1", sourceLoadQueuePeon);
    loadManagementPeons.put("localhost:2", destinationLoadQueuePeon);
    segmentRemovedLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(1);
    ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(sourceSegments.get(2));
    EasyMock.replay(druidDataSource);
    EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
    EasyMock.expect(coordinatorRuntimeParams.getDataSourcesSnapshot()).andReturn(dataSourcesSnapshot).anyTimes();
    EasyMock.replay(segmentsMetadataManager, coordinatorRuntimeParams);
    EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
    EasyMock.replay(dataSourcesSnapshot);
    coordinator.moveSegment(coordinatorRuntimeParams, source.toImmutableDruidServer(), dest.toImmutableDruidServer(), sourceSegments.get(2), null);
    // wait for destination server to load segment
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    // remove load queue key from destination server to trigger adding drop to load queue
    curator.delete().guaranteed().forPath(ZKPaths.makePath(DESTINATION_LOAD_PATH, segmentToMove.getId().toString()));
    // wait for drop
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    // clean up drop from load queue
    curator.delete().guaranteed().forPath(ZKPaths.makePath(SOURCE_LOAD_PATH, segmentToMove.getId().toString()));
    List<DruidServer> servers = new ArrayList<>(serverView.getInventory());
    Assert.assertEquals(2, servers.get(0).getTotalSegments());
    Assert.assertEquals(2, servers.get(1).getTotalSegments());
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) SegmentId(org.apache.druid.timeline.SegmentId) ArrayList(java.util.ArrayList) DruidServer(org.apache.druid.client.DruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 29 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class BalancerStrategyTest method findNewSegmentHomeReplicatorNotEnoughSpace.

@Test
public void findNewSegmentHomeReplicatorNotEnoughSpace() {
    final ServerHolder serverHolder = new ServerHolder(new DruidServer("server1", "host1", null, 10L, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(proposedDataSegment).toImmutableDruidServer(), new LoadQueuePeonTester());
    serverHolders = new ArrayList<>();
    serverHolders.add(serverHolder);
    final ServerHolder foundServerHolder = balancerStrategy.findNewSegmentHomeReplicator(proposedDataSegment, serverHolders);
    // since there is not enough space on server having available size 10L to host a segment of size 11L, it should be null
    Assert.assertNull(foundServerHolder);
}
Also used : DruidServer(org.apache.druid.client.DruidServer) Test(org.junit.Test)

Example 30 with DruidServer

use of org.apache.druid.client.DruidServer in project druid by druid-io.

the class BalancerStrategyTest method findNewSegmentHomeReplicatorNotEnoughNodesForReplication.

@Test(timeout = 5000L)
public void findNewSegmentHomeReplicatorNotEnoughNodesForReplication() {
    final ServerHolder serverHolder1 = new ServerHolder(new DruidServer("server1", "host1", null, 1000L, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(proposedDataSegment).toImmutableDruidServer(), new LoadQueuePeonTester());
    final ServerHolder serverHolder2 = new ServerHolder(new DruidServer("server2", "host2", null, 1000L, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(proposedDataSegment).toImmutableDruidServer(), new LoadQueuePeonTester());
    serverHolders = new ArrayList<>();
    serverHolders.add(serverHolder1);
    serverHolders.add(serverHolder2);
    final ServerHolder foundServerHolder = balancerStrategy.findNewSegmentHomeReplicator(proposedDataSegment, serverHolders);
    // since there is not enough nodes to load 3 replicas of segment
    Assert.assertNull(foundServerHolder);
}
Also used : DruidServer(org.apache.druid.client.DruidServer) Test(org.junit.Test)

Aggregations

DruidServer (org.apache.druid.client.DruidServer)73 Test (org.junit.Test)57 DataSegment (org.apache.druid.timeline.DataSegment)43 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)21 IntervalLoadRule (org.apache.druid.server.coordinator.rules.IntervalLoadRule)18 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)17 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)14 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)12 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)12 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)11 HashMap (java.util.HashMap)9 Object2LongMap (it.unimi.dsi.fastutil.objects.Object2LongMap)8 ArrayList (java.util.ArrayList)8 Response (javax.ws.rs.core.Response)8 ForeverLoadRule (org.apache.druid.server.coordinator.rules.ForeverLoadRule)8 HashSet (java.util.HashSet)7 Map (java.util.Map)7 CountDownLatch (java.util.concurrent.CountDownLatch)7 DirectDruidClient (org.apache.druid.client.DirectDruidClient)7 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)7