use of io.druid.client.ImmutableDruidServer in project druid by druid-io.
the class DruidCoordinatorCleanupUnneeded method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
CoordinatorStats stats = new CoordinatorStats();
Set<DataSegment> availableSegments = params.getAvailableSegments();
DruidCluster cluster = params.getDruidCluster();
// cleanup before it finished polling the metadata storage for available segments for the first time.
if (!availableSegments.isEmpty()) {
for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
for (ServerHolder serverHolder : serverHolders) {
ImmutableDruidServer server = serverHolder.getServer();
for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
for (DataSegment segment : dataSource.getSegments()) {
if (!availableSegments.contains(segment)) {
LoadQueuePeon queuePeon = params.getLoadManagementPeons().get(server.getName());
if (!queuePeon.getSegmentsToDrop().contains(segment)) {
queuePeon.dropSegment(segment, new LoadPeonCallback() {
@Override
public void execute() {
}
});
stats.addToTieredStat("unneededCount", server.getTier(), 1);
}
}
}
}
}
}
} else {
log.info("Found 0 availableSegments, skipping the cleanup of segments from historicals. This is done to prevent a race condition in which the coordinator would drop all segments if it started running cleanup before it finished polling the metadata storage for available segments for the first time.");
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of io.druid.client.ImmutableDruidServer in project druid by druid-io.
the class DruidCluster method add.
public void add(ServerHolder serverHolder) {
ImmutableDruidServer server = serverHolder.getServer();
MinMaxPriorityQueue<ServerHolder> tierServers = cluster.get(server.getTier());
if (tierServers == null) {
tierServers = MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create();
cluster.put(server.getTier(), tierServers);
}
tierServers.add(serverHolder);
}
use of io.druid.client.ImmutableDruidServer in project druid by druid-io.
the class DruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() throws Exception {
loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
EasyMock.replay(loadQueuePeon);
segment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.replay(segment);
scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
EasyMock.replay(scheduledExecutorFactory);
EasyMock.replay(metadataRuleManager);
EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment", segment))).atLeastOnce();
EasyMock.replay(druidServer);
druidServer2 = EasyMock.createMock(DruidServer.class);
EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment2", segment))).atLeastOnce();
EasyMock.replay(druidServer2);
loadManagementPeons.put("from", loadQueuePeon);
loadManagementPeons.put("to", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventoryManagerConfig()).andReturn(new InventoryManagerConfig() {
@Override
public String getContainerPath() {
return "";
}
@Override
public String getInventoryPath() {
return "";
}
});
EasyMock.replay(serverInventoryView);
coordinator.moveSegment(druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), "dummySegment", null);
EasyMock.verify(druidServer);
EasyMock.verify(druidServer2);
EasyMock.verify(loadQueuePeon);
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
use of io.druid.client.ImmutableDruidServer in project druid by druid-io.
the class CostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = Lists.newArrayList();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
}
serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
}
// The best server to be available for next segment assignment has only 98 Segments
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < (maxSegments - 2); j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
}
EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
use of io.druid.client.ImmutableDruidServer in project druid by druid-io.
the class DiskNormalizedCostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = Lists.newArrayList();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
}
serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
}
// The best server to be available for next segment assignment has greater max Size
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(100000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
}
EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
Aggregations