Search in sources :

Example 6 with MinMaxPriorityQueue

use of com.google.common.collect.MinMaxPriorityQueue in project druid by druid-io.

the class DruidCoordinatorBalancerTest method testRun2.

@Test
public void testRun2() throws IOException {
    // Mock some servers of different usages
    EasyMock.expect(druidServer1.getName()).andReturn("1").atLeastOnce();
    EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
    EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
    EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer1);
    EasyMock.expect(druidServer2.getName()).andReturn("2").atLeastOnce();
    EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer2);
    EasyMock.expect(druidServer3.getName()).andReturn("3").atLeastOnce();
    EasyMock.expect(druidServer3.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer3.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer3.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer3.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer3.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer3);
    EasyMock.expect(druidServer4.getName()).andReturn("4").atLeastOnce();
    EasyMock.expect(druidServer4.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer4.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer4.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer4.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer4.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer4);
    // Mock stuff that the coordinator needs
    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);
    LoadQueuePeonTester peon1 = new LoadQueuePeonTester();
    LoadQueuePeonTester peon2 = new LoadQueuePeonTester();
    LoadQueuePeonTester peon3 = new LoadQueuePeonTester();
    LoadQueuePeonTester peon4 = new LoadQueuePeonTester();
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(Arrays.asList(new ServerHolder(druidServer1, peon1), new ServerHolder(druidServer2, peon2), new ServerHolder(druidServer3, peon3), new ServerHolder(druidServer4, peon4)))))).withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("1", peon1, "2", peon2, "3", peon3, "4", peon4)).withAvailableSegments(segments.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    params = new DruidCoordinatorBalancerTester(coordinator).run(params);
    Assert.assertTrue(params.getCoordinatorStats().getPerTierStats().get("movedCount").get("normal").get() > 0);
    exec.shutdown();
}
Also used : HashMap(java.util.HashMap) DateTime(org.joda.time.DateTime) MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Test(org.junit.Test)

Example 7 with MinMaxPriorityQueue

use of com.google.common.collect.MinMaxPriorityQueue in project pinot by linkedin.

the class PinotLLCRealtimeSegmentManager method completeCommittingSegmentsInternal.

private void completeCommittingSegmentsInternal(String realtimeTableName, Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments) {
    IdealState idealState = getTableIdealState(realtimeTableName);
    Set<String> segmentNamesIS = idealState.getPartitionSet();
    final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
    for (Map.Entry<Integer, MinMaxPriorityQueue<LLCSegmentName>> entry : partitionToLatestSegments.entrySet()) {
        final LLCSegmentName segmentName = entry.getValue().pollFirst();
        final String segmentId = segmentName.getSegmentName();
        final int partitionId = entry.getKey();
        if (!segmentNamesIS.contains(segmentId)) {
            LOGGER.info("{}:Repairing segment for partition {}. Segment {} not found in idealstate", realtimeTableName, partitionId, segmentId);
            List<String> newInstances = partitionAssignment.getListField(Integer.toString(partitionId));
            LOGGER.info("{}: Assigning segment {} to {}", realtimeTableName, segmentId, newInstances);
            // TODO Re-write num-partitions in metadata if needed.
            // If there was a prev segment in the same partition, then we need to fix it to be ONLINE.
            LLCSegmentName prevSegmentName = entry.getValue().pollLast();
            String prevSegmentNameStr = null;
            if (prevSegmentName != null) {
                prevSegmentNameStr = prevSegmentName.getSegmentName();
            }
            updateIdealState(realtimeTableName, newInstances, prevSegmentNameStr, segmentId);
        }
    }
}
Also used : MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) Object2IntLinkedOpenHashMap(it.unimi.dsi.fastutil.objects.Object2IntLinkedOpenHashMap) Map(java.util.Map) HashMap(java.util.HashMap) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName) IdealState(org.apache.helix.model.IdealState) ZNRecord(org.apache.helix.ZNRecord)

Aggregations

MinMaxPriorityQueue (com.google.common.collect.MinMaxPriorityQueue)7 HashMap (java.util.HashMap)6 DateTime (org.joda.time.DateTime)4 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)3 Test (org.junit.Test)3 LLCSegmentName (com.linkedin.pinot.common.utils.LLCSegmentName)2 Object2IntLinkedOpenHashMap (it.unimi.dsi.fastutil.objects.Object2IntLinkedOpenHashMap)2 Map (java.util.Map)2 Stopwatch (com.google.common.base.Stopwatch)1 ImmutableList (com.google.common.collect.ImmutableList)1 DruidServer (io.druid.client.DruidServer)1 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)1 BalancerSegmentHolder (io.druid.server.coordinator.BalancerSegmentHolder)1 BalancerStrategy (io.druid.server.coordinator.BalancerStrategy)1 CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)1 ServerHolder (io.druid.server.coordinator.ServerHolder)1 DruidCoordinatorRuleRunner (io.druid.server.coordinator.helper.DruidCoordinatorRuleRunner)1 DataSegment (io.druid.timeline.DataSegment)1 Object2IntMap (it.unimi.dsi.fastutil.objects.Object2IntMap)1 Comparator (java.util.Comparator)1