Search in sources :

Example 6 with ImmutableDruidServer

use of io.druid.client.ImmutableDruidServer in project druid by druid-io.

the class DruidCoordinatorBalancerProfiler method bigProfiler.

public void bigProfiler() {
    Stopwatch watch = Stopwatch.createUnstarted();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules)).anyTimes();
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);
    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);
    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i, new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)), (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 4L));
    }
    for (int i = 0; i < numServers; i++) {
        ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);
        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }
    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(serverHolderList)))).withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).withReplicantLifetime(500).withReplicationThrottleLimit(5).build()).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter).withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500)).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(serverHolderList))))).build();
    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    DruidCoordinatorRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}
Also used : HashMap(java.util.HashMap) Stopwatch(com.google.common.base.Stopwatch) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) DruidCoordinatorRuleRunner(io.druid.server.coordinator.helper.DruidCoordinatorRuleRunner) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) DruidServer(io.druid.client.DruidServer) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) Interval(org.joda.time.Interval)

Example 7 with ImmutableDruidServer

use of io.druid.client.ImmutableDruidServer in project druid by druid-io.

the class SegmentReplicantLookup method make.

public static SegmentReplicantLookup make(DruidCluster cluster) {
    final Table<String, String, Integer> segmentsInCluster = HashBasedTable.create();
    final Table<String, String, Integer> loadingSegments = HashBasedTable.create();
    for (MinMaxPriorityQueue<ServerHolder> serversByType : cluster.getSortedServersByTier()) {
        for (ServerHolder serverHolder : serversByType) {
            ImmutableDruidServer server = serverHolder.getServer();
            for (DataSegment segment : server.getSegments().values()) {
                Integer numReplicants = segmentsInCluster.get(segment.getIdentifier(), server.getTier());
                if (numReplicants == null) {
                    numReplicants = 0;
                }
                segmentsInCluster.put(segment.getIdentifier(), server.getTier(), ++numReplicants);
            }
            // Also account for queued segments
            for (DataSegment segment : serverHolder.getPeon().getSegmentsToLoad()) {
                Integer numReplicants = loadingSegments.get(segment.getIdentifier(), server.getTier());
                if (numReplicants == null) {
                    numReplicants = 0;
                }
                loadingSegments.put(segment.getIdentifier(), server.getTier(), ++numReplicants);
            }
        }
    }
    return new SegmentReplicantLookup(segmentsInCluster, loadingSegments);
}
Also used : DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 8 with ImmutableDruidServer

use of io.druid.client.ImmutableDruidServer in project druid by druid-io.

the class DruidCoordinatorBalancer method moveSegment.

protected void moveSegment(final BalancerSegmentHolder segment, final ImmutableDruidServer toServer, final DruidCoordinatorRuntimeParams params) {
    final LoadQueuePeon toPeon = params.getLoadManagementPeons().get(toServer.getName());
    final ImmutableDruidServer fromServer = segment.getFromServer();
    final DataSegment segmentToMove = segment.getSegment();
    final String segmentName = segmentToMove.getIdentifier();
    if (!toPeon.getSegmentsToLoad().contains(segmentToMove) && (toServer.getSegment(segmentName) == null) && new ServerHolder(toServer, toPeon).getAvailableSize() > segmentToMove.getSize()) {
        log.info("Moving [%s] from [%s] to [%s]", segmentName, fromServer.getName(), toServer.getName());
        LoadPeonCallback callback = null;
        try {
            currentlyMovingSegments.get(toServer.getTier()).put(segmentName, segment);
            callback = new LoadPeonCallback() {

                @Override
                public void execute() {
                    Map<String, BalancerSegmentHolder> movingSegments = currentlyMovingSegments.get(toServer.getTier());
                    if (movingSegments != null) {
                        movingSegments.remove(segmentName);
                    }
                }
            };
            coordinator.moveSegment(fromServer, toServer, segmentToMove.getIdentifier(), callback);
        } catch (Exception e) {
            log.makeAlert(e, String.format("[%s] : Moving exception", segmentName)).emit();
            if (callback != null) {
                callback.execute();
            }
        }
    } else {
        currentlyMovingSegments.get(toServer.getTier()).remove(segmentName);
    }
}
Also used : LoadPeonCallback(io.druid.server.coordinator.LoadPeonCallback) ServerHolder(io.druid.server.coordinator.ServerHolder) LoadQueuePeon(io.druid.server.coordinator.LoadQueuePeon) DataSegment(io.druid.timeline.DataSegment) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Map(java.util.Map) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 9 with ImmutableDruidServer

use of io.druid.client.ImmutableDruidServer in project druid by druid-io.

the class DruidCoordinatorCleanupOvershadowed method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    // Unservice old partitions if we've had enough time to make sure we aren't flapping with old data
    if (params.hasDeletionWaitTimeElapsed()) {
        DruidCluster cluster = params.getDruidCluster();
        Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = Maps.newHashMap();
        for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
            for (ServerHolder serverHolder : serverHolders) {
                ImmutableDruidServer server = serverHolder.getServer();
                for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
                    VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSource.getName());
                    if (timeline == null) {
                        timeline = new VersionedIntervalTimeline<>(Comparators.comparable());
                        timelines.put(dataSource.getName(), timeline);
                    }
                    for (DataSegment segment : dataSource.getSegments()) {
                        timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
                    }
                }
            }
        }
        //Remove all segments in db that are overshadowed by served segments
        for (DataSegment dataSegment : params.getAvailableSegments()) {
            VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
            if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion())) {
                coordinator.removeSegment(dataSegment);
                stats.addToGlobalStat("overShadowedCount", 1);
            }
        }
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) ServerHolder(io.druid.server.coordinator.ServerHolder) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 10 with ImmutableDruidServer

use of io.druid.client.ImmutableDruidServer in project druid by druid-io.

the class DruidCoordinatorLogger method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount");
    if (assigned != null) {
        for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) {
            log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
        }
    }
    emitTieredStats(emitter, "segment/assigned/count", assigned);
    Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount");
    if (dropped != null) {
        for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) {
            log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
        }
    }
    emitTieredStats(emitter, "segment/dropped/count", dropped);
    emitTieredStats(emitter, "segment/cost/raw", stats.getPerTierStats().get("initialCost"));
    emitTieredStats(emitter, "segment/cost/normalization", stats.getPerTierStats().get("normalization"));
    emitTieredStats(emitter, "segment/moved/count", stats.getPerTierStats().get("movedCount"));
    emitTieredStats(emitter, "segment/deleted/count", stats.getPerTierStats().get("deletedCount"));
    Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand");
    if (normalized != null) {
        emitTieredStats(emitter, "segment/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() {

            @Override
            public Number transformEntry(String key, AtomicLong value) {
                return value.doubleValue() / 1000d;
            }
        }));
    }
    Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount");
    if (unneeded != null) {
        for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) {
            log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
        }
    }
    emitTieredStats(emitter, "segment/unneeded/count", stats.getPerTierStats().get("unneededCount"));
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStats().get("overShadowedCount")));
    Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount");
    if (moved != null) {
        for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) {
            log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get());
        }
    }
    final Map<String, AtomicLong> unmoved = stats.getPerTierStats().get("unmovedCount");
    if (unmoved != null) {
        for (Map.Entry<String, AtomicLong> entry : unmoved.entrySet()) {
            log.info("[%s] : Let alone %,d segment(s)", entry.getKey(), entry.getValue().get());
        }
    }
    log.info("Load Queues:");
    for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
        }
    }
    // Emit coordinator metrics
    final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet();
    for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) {
        String serverName = entry.getKey();
        LoadQueuePeon queuePeon = entry.getValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    }
    for (Map.Entry<String, AtomicLong> entry : coordinator.getSegmentAvailability().entrySet()) {
        String datasource = entry.getKey();
        Long count = entry.getValue().get();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/unavailable/count", count));
    }
    for (Map.Entry<String, CountingMap<String>> entry : coordinator.getReplicationStatus().entrySet()) {
        String tier = entry.getKey();
        CountingMap<String> datasourceAvailabilities = entry.getValue();
        for (Map.Entry<String, AtomicLong> datasourceAvailability : datasourceAvailabilities.entrySet()) {
            String datasource = datasourceAvailability.getKey();
            Long count = datasourceAvailability.getValue().get();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/underReplicated/count", count));
        }
    }
    // Emit segment metrics
    CountingMap<String> segmentSizes = new CountingMap<String>();
    CountingMap<String> segmentCounts = new CountingMap<String>();
    for (DruidDataSource dataSource : params.getDataSources()) {
        for (DataSegment segment : dataSource.getSegments()) {
            segmentSizes.add(dataSource.getName(), segment.getSize());
            segmentCounts.add(dataSource.getName(), 1L);
        }
    }
    for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) {
        String dataSource = entry.getKey();
        Long size = entry.getValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", size));
    }
    for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) {
        String dataSource = entry.getKey();
        Long count = entry.getValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", count));
    }
    return params;
}
Also used : ServiceEmitter(com.metamx.emitter.service.ServiceEmitter) CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DataSegment(io.druid.timeline.DataSegment) LoadQueuePeon(io.druid.server.coordinator.LoadQueuePeon) DruidCluster(io.druid.server.coordinator.DruidCluster) DruidDataSource(io.druid.client.DruidDataSource) CountingMap(io.druid.collections.CountingMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) ServerHolder(io.druid.server.coordinator.ServerHolder) AtomicLong(java.util.concurrent.atomic.AtomicLong) ServiceMetricEvent(com.metamx.emitter.service.ServiceMetricEvent) CountingMap(io.druid.collections.CountingMap) Map(java.util.Map) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Aggregations

ImmutableDruidServer (io.druid.client.ImmutableDruidServer)10 DataSegment (io.druid.timeline.DataSegment)9 ServerHolder (io.druid.server.coordinator.ServerHolder)4 DruidServerMetadata (io.druid.server.coordination.DruidServerMetadata)3 CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)3 DruidCluster (io.druid.server.coordinator.DruidCluster)3 LoadQueuePeon (io.druid.server.coordinator.LoadQueuePeon)3 DruidServer (io.druid.client.DruidServer)2 ImmutableDruidDataSource (io.druid.client.ImmutableDruidDataSource)2 LoadPeonCallback (io.druid.server.coordinator.LoadPeonCallback)2 Map (java.util.Map)2 Stopwatch (com.google.common.base.Stopwatch)1 ImmutableList (com.google.common.collect.ImmutableList)1 MinMaxPriorityQueue (com.google.common.collect.MinMaxPriorityQueue)1 ServiceEmitter (com.metamx.emitter.service.ServiceEmitter)1 ServiceMetricEvent (com.metamx.emitter.service.ServiceMetricEvent)1 DruidDataSource (io.druid.client.DruidDataSource)1 CountingMap (io.druid.collections.CountingMap)1 InventoryManagerConfig (io.druid.curator.inventory.InventoryManagerConfig)1 ScheduledExecutorFactory (io.druid.java.util.common.concurrent.ScheduledExecutorFactory)1