use of com.google.common.collect.MinMaxPriorityQueue in project druid by druid-io.
the class DruidCoordinatorBalancerTest method testRun2.
@Test
public void testRun2() throws IOException {
// Mock some servers of different usages
EasyMock.expect(druidServer1.getName()).andReturn("1").atLeastOnce();
EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(druidServer1);
EasyMock.expect(druidServer2.getName()).andReturn("2").atLeastOnce();
EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(druidServer2);
EasyMock.expect(druidServer3.getName()).andReturn("3").atLeastOnce();
EasyMock.expect(druidServer3.getTier()).andReturn("normal").anyTimes();
EasyMock.expect(druidServer3.getCurrSize()).andReturn(0L).atLeastOnce();
EasyMock.expect(druidServer3.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(druidServer3.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
EasyMock.expect(druidServer3.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(druidServer3);
EasyMock.expect(druidServer4.getName()).andReturn("4").atLeastOnce();
EasyMock.expect(druidServer4.getTier()).andReturn("normal").anyTimes();
EasyMock.expect(druidServer4.getCurrSize()).andReturn(0L).atLeastOnce();
EasyMock.expect(druidServer4.getMaxSize()).andReturn(100L).atLeastOnce();
EasyMock.expect(druidServer4.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
EasyMock.expect(druidServer4.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(druidServer4);
// Mock stuff that the coordinator needs
coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(coordinator);
LoadQueuePeonTester peon1 = new LoadQueuePeonTester();
LoadQueuePeonTester peon2 = new LoadQueuePeonTester();
LoadQueuePeonTester peon3 = new LoadQueuePeonTester();
LoadQueuePeonTester peon4 = new LoadQueuePeonTester();
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(Arrays.asList(new ServerHolder(druidServer1, peon1), new ServerHolder(druidServer2, peon2), new ServerHolder(druidServer3, peon3), new ServerHolder(druidServer4, peon4)))))).withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("1", peon1, "2", peon2, "3", peon3, "4", peon4)).withAvailableSegments(segments.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
params = new DruidCoordinatorBalancerTester(coordinator).run(params);
Assert.assertTrue(params.getCoordinatorStats().getPerTierStats().get("movedCount").get("normal").get() > 0);
exec.shutdown();
}
use of com.google.common.collect.MinMaxPriorityQueue in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManager method completeCommittingSegmentsInternal.
private void completeCommittingSegmentsInternal(String realtimeTableName, Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments) {
IdealState idealState = getTableIdealState(realtimeTableName);
Set<String> segmentNamesIS = idealState.getPartitionSet();
final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
for (Map.Entry<Integer, MinMaxPriorityQueue<LLCSegmentName>> entry : partitionToLatestSegments.entrySet()) {
final LLCSegmentName segmentName = entry.getValue().pollFirst();
final String segmentId = segmentName.getSegmentName();
final int partitionId = entry.getKey();
if (!segmentNamesIS.contains(segmentId)) {
LOGGER.info("{}:Repairing segment for partition {}. Segment {} not found in idealstate", realtimeTableName, partitionId, segmentId);
List<String> newInstances = partitionAssignment.getListField(Integer.toString(partitionId));
LOGGER.info("{}: Assigning segment {} to {}", realtimeTableName, segmentId, newInstances);
// TODO Re-write num-partitions in metadata if needed.
// If there was a prev segment in the same partition, then we need to fix it to be ONLINE.
LLCSegmentName prevSegmentName = entry.getValue().pollLast();
String prevSegmentNameStr = null;
if (prevSegmentName != null) {
prevSegmentNameStr = prevSegmentName.getSegmentName();
}
updateIdealState(realtimeTableName, newInstances, prevSegmentNameStr, segmentId);
}
}
}
Aggregations