Search in sources :

Example 16 with ServerHolder

use of org.apache.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class BroadcastDistributionRuleTest method setUp.

@Before
public void setUp() {
    smallSegment = new DataSegment("small_source", Intervals.of("0/1000"), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 0);
    for (int i = 0; i < 3; i++) {
        largeSegments.add(new DataSegment("large_source", Intervals.of((i * 1000) + "/" + ((i + 1) * 1000)), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 100));
    }
    for (int i = 0; i < 2; i++) {
        largeSegments2.add(new DataSegment("large_source2", Intervals.of((i * 1000) + "/" + ((i + 1) * 1000)), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 100));
    }
    holderOfSmallSegment = new ServerHolder(new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(smallSegment).toImmutableDruidServer(), new LoadQueuePeonTester());
    holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverHot1", "hostHot1", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(largeSegments.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverNorm1", "hostNorm1", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverNorm2", "hostNorm2", null, 100, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments.get(2)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments2.add(new ServerHolder(new DruidServer("serverHot3", "hostHot3", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(largeSegments2.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments2.add(new ServerHolder(new DruidServer("serverNorm3", "hostNorm3", null, 100, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments2.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    activeServer = new ServerHolder(new DruidServer("active", "host1", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(largeSegments.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester());
    decommissioningServer1 = new ServerHolder(new DruidServer("decommissioning1", "host2", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(smallSegment).toImmutableDruidServer(), new LoadQueuePeonTester(), true);
    decommissioningServer2 = new ServerHolder(new DruidServer("decommissioning2", "host3", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(largeSegments.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester(), true);
    druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", holdersOfLargeSegments.get(0), holderOfSmallSegment, holdersOfLargeSegments2.get(0)).addTier(DruidServer.DEFAULT_TIER, holdersOfLargeSegments.get(1), holdersOfLargeSegments.get(2), holdersOfLargeSegments2.get(1)).build();
    secondCluster = DruidClusterBuilder.newBuilder().addTier("tier1", activeServer, decommissioningServer1, decommissioningServer2).build();
}
Also used : ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) LoadQueuePeonTester(org.apache.druid.server.coordinator.LoadQueuePeonTester) Before(org.junit.Before)

Example 17 with ServerHolder

use of org.apache.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class BalanceSegments method balanceServers.

private Pair<Integer, Integer> balanceServers(DruidCoordinatorRuntimeParams params, List<ServerHolder> toMoveFrom, List<ServerHolder> toMoveTo, int maxSegmentsToMove) {
    if (maxSegmentsToMove <= 0) {
        log.debug("maxSegmentsToMove is 0; no balancing work can be performed.");
        return new Pair<>(0, 0);
    } else if (toMoveFrom.isEmpty()) {
        log.debug("toMoveFrom is empty; no balancing work can be performed.");
        return new Pair<>(0, 0);
    } else if (toMoveTo.isEmpty()) {
        log.debug("toMoveTo is empty; no balancing work can be peformed.");
        return new Pair<>(0, 0);
    }
    final BalancerStrategy strategy = params.getBalancerStrategy();
    final int maxIterations = 2 * maxSegmentsToMove;
    final int maxToLoad = params.getCoordinatorDynamicConfig().getMaxSegmentsInNodeLoadingQueue();
    int moved = 0, unmoved = 0;
    Iterator<BalancerSegmentHolder> segmentsToMove;
    // The pick method depends on if the operator has enabled batched segment sampling in the Coorinator dynamic config.
    if (params.getCoordinatorDynamicConfig().useBatchedSegmentSampler()) {
        segmentsToMove = strategy.pickSegmentsToMove(toMoveFrom, params.getBroadcastDatasources(), maxSegmentsToMove);
    } else {
        segmentsToMove = strategy.pickSegmentsToMove(toMoveFrom, params.getBroadcastDatasources(), params.getCoordinatorDynamicConfig().getPercentOfSegmentsToConsiderPerMove());
    }
    // noinspection ForLoopThatDoesntUseLoopVariable
    for (int iter = 0; (moved + unmoved) < maxSegmentsToMove; ++iter) {
        if (!segmentsToMove.hasNext()) {
            log.info("All servers to move segments from are empty, ending run.");
            break;
        }
        final BalancerSegmentHolder segmentToMoveHolder = segmentsToMove.next();
        // DruidCoordinatorRuntimeParams.getUsedSegments originate from SegmentsMetadataManager, i. e. that's a set of segments
        // that *should* be loaded. segmentToMoveHolder.getSegment originates from ServerInventoryView,  i. e. that may be
        // any segment that happens to be loaded on some server, even if it is not used. (Coordinator closes such
        // discrepancies eventually via UnloadUnusedSegments). Therefore the picked segmentToMoveHolder's segment may not
        // need to be balanced.
        boolean needToBalancePickedSegment = params.getUsedSegments().contains(segmentToMoveHolder.getSegment());
        if (needToBalancePickedSegment) {
            final DataSegment segmentToMove = segmentToMoveHolder.getSegment();
            final ImmutableDruidServer fromServer = segmentToMoveHolder.getFromServer();
            // we want to leave the server the segment is currently on in the list...
            // but filter out replicas that are already serving the segment, and servers with a full load queue
            final List<ServerHolder> toMoveToWithLoadQueueCapacityAndNotServingSegment = toMoveTo.stream().filter(s -> s.getServer().equals(fromServer) || (!s.isServingSegment(segmentToMove) && (maxToLoad <= 0 || s.getNumberOfSegmentsInQueue() < maxToLoad))).collect(Collectors.toList());
            if (toMoveToWithLoadQueueCapacityAndNotServingSegment.size() > 0) {
                final ServerHolder destinationHolder = strategy.findNewSegmentHomeBalancer(segmentToMove, toMoveToWithLoadQueueCapacityAndNotServingSegment);
                if (destinationHolder != null && !destinationHolder.getServer().equals(fromServer)) {
                    if (moveSegment(segmentToMoveHolder, destinationHolder.getServer(), params)) {
                        moved++;
                    } else {
                        unmoved++;
                    }
                } else {
                    log.debug("Segment [%s] is 'optimally' placed.", segmentToMove.getId());
                    unmoved++;
                }
            } else {
                log.debug("No valid movement destinations for segment [%s].", segmentToMove.getId());
                unmoved++;
            }
        }
        if (iter >= maxIterations) {
            log.info("Unable to select %d remaining candidate segments out of %d total to balance " + "after %d iterations, ending run.", (maxSegmentsToMove - moved - unmoved), maxSegmentsToMove, iter);
            break;
        }
    }
    return new Pair<>(moved, unmoved);
}
Also used : EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) SortedSet(java.util.SortedSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) LoadPeonCallback(org.apache.druid.server.coordinator.LoadPeonCallback) HashMap(java.util.HashMap) NavigableSet(java.util.NavigableSet) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Collectors(java.util.stream.Collectors) Pair(org.apache.druid.java.util.common.Pair) ConcurrentMap(java.util.concurrent.ConcurrentMap) List(java.util.List) Lists(com.google.common.collect.Lists) BalancerStrategy(org.apache.druid.server.coordinator.BalancerStrategy) Map(java.util.Map) DataSegment(org.apache.druid.timeline.DataSegment) BalancerSegmentHolder(org.apache.druid.server.coordinator.BalancerSegmentHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) SegmentId(org.apache.druid.timeline.SegmentId) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) BalancerStrategy(org.apache.druid.server.coordinator.BalancerStrategy) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) BalancerSegmentHolder(org.apache.druid.server.coordinator.BalancerSegmentHolder) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Pair(org.apache.druid.java.util.common.Pair)

Example 18 with ServerHolder

use of org.apache.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class EmitClusterStatsAndMetrics method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    stats.forEachTieredStat("assignedCount", (final String tier, final long count) -> {
        log.info("[%s] : Assigned %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/assigned/count", tier, count);
    });
    stats.forEachTieredStat("droppedCount", (final String tier, final long count) -> {
        log.info("[%s] : Dropped %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/dropped/count", tier, count);
    });
    emitTieredStats(emitter, "segment/cost/raw", stats, "initialCost");
    emitTieredStats(emitter, "segment/cost/normalization", stats, "normalization");
    emitTieredStats(emitter, "segment/moved/count", stats, "movedCount");
    emitTieredStats(emitter, "segment/deleted/count", stats, "deletedCount");
    stats.forEachTieredStat("normalizedInitialCostTimesOneThousand", (final String tier, final long count) -> {
        emitTieredStat(emitter, "segment/cost/normalized", tier, count / 1000d);
    });
    stats.forEachTieredStat("unneededCount", (final String tier, final long count) -> {
        log.info("[%s] : Removed %s unneeded segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/unneeded/count", tier, count);
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStat("overShadowedCount")));
    stats.forEachTieredStat("movedCount", (final String tier, final long count) -> {
        log.info("[%s] : Moved %,d segment(s)", tier, count);
    });
    stats.forEachTieredStat("unmovedCount", (final String tier, final long count) -> {
        log.info("[%s] : Let alone %,d segment(s)", tier, count);
    });
    log.info("Load Queues:");
    for (Iterable<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType().toString(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
            stats.addToTieredStat(TOTAL_CAPACITY, server.getTier(), server.getMaxSize());
            stats.addToTieredStat(TOTAL_HISTORICAL_COUNT, server.getTier(), 1);
        }
    }
    params.getDatabaseRuleManager().getAllRules().values().forEach(rules -> rules.forEach(rule -> {
        if (rule instanceof LoadRule) {
            ((LoadRule) rule).getTieredReplicants().forEach((tier, replica) -> stats.accumulateMaxTieredStat(MAX_REPLICATION_FACTOR, tier, replica));
        }
    }));
    emitTieredStats(emitter, "tier/required/capacity", stats, LoadRule.REQUIRED_CAPACITY);
    emitTieredStats(emitter, "tier/total/capacity", stats, TOTAL_CAPACITY);
    emitTieredStats(emitter, "tier/replication/factor", stats, MAX_REPLICATION_FACTOR);
    emitTieredStats(emitter, "tier/historical/count", stats, TOTAL_HISTORICAL_COUNT);
    // Emit coordinator metrics
    params.getLoadManagementPeons().forEach((final String serverName, final LoadQueuePeon queuePeon) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    });
    coordinator.computeNumsUnavailableUsedSegmentsPerDataSource().object2IntEntrySet().forEach((final Object2IntMap.Entry<String> entry) -> {
        final String dataSource = entry.getKey();
        final int numUnavailableUsedSegmentsInDataSource = entry.getIntValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/unavailable/count", numUnavailableUsedSegmentsInDataSource));
    });
    coordinator.computeUnderReplicationCountsPerDataSourcePerTier().forEach((final String tier, final Object2LongMap<String> underReplicationCountsPerDataSource) -> {
        for (final Object2LongMap.Entry<String> entry : underReplicationCountsPerDataSource.object2LongEntrySet()) {
            final String dataSource = entry.getKey();
            final long underReplicationCount = entry.getLongValue();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/underReplicated/count", underReplicationCount));
        }
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("compact/task/count", stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/maxSlot/count", stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/availableSlot/count", stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT)));
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/compacted/count", count));
    });
    // Emit segment metrics
    params.getUsedSegmentsTimelinesPerDataSource().forEach((String dataSource, VersionedIntervalTimeline<String, DataSegment> dataSourceWithUsedSegments) -> {
        long totalSizeOfUsedSegments = dataSourceWithUsedSegments.iterateAllObjects().stream().mapToLong(DataSegment::getSize).sum();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", totalSizeOfUsedSegments));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", dataSourceWithUsedSegments.getNumObjects()));
    });
    // Emit coordinator runtime stats
    emitDutyStats(emitter, "coordinator/time", stats, "runtime");
    return params;
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Logger(org.apache.druid.java.util.common.logger.Logger) DruidMetrics(org.apache.druid.query.DruidMetrics) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 19 with ServerHolder

use of org.apache.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class BalanceSegments method moveSegment.

protected boolean moveSegment(final BalancerSegmentHolder segment, final ImmutableDruidServer toServer, final DruidCoordinatorRuntimeParams params) {
    final LoadQueuePeon toPeon = params.getLoadManagementPeons().get(toServer.getName());
    final ImmutableDruidServer fromServer = segment.getFromServer();
    final DataSegment segmentToMove = segment.getSegment();
    final SegmentId segmentId = segmentToMove.getId();
    if (!toPeon.getSegmentsToLoad().contains(segmentToMove) && (toServer.getSegment(segmentId) == null) && new ServerHolder(toServer, toPeon).getAvailableSize() > segmentToMove.getSize()) {
        log.debug("Moving [%s] from [%s] to [%s]", segmentId, fromServer.getName(), toServer.getName());
        LoadPeonCallback callback = null;
        try {
            ConcurrentMap<SegmentId, BalancerSegmentHolder> movingSegments = currentlyMovingSegments.get(toServer.getTier());
            movingSegments.put(segmentId, segment);
            callback = () -> movingSegments.remove(segmentId);
            coordinator.moveSegment(params, fromServer, toServer, segmentToMove, callback);
            return true;
        } catch (Exception e) {
            log.makeAlert(e, StringUtils.format("[%s] : Moving exception", segmentId)).emit();
            if (callback != null) {
                callback.execute();
            }
        }
    }
    return false;
}
Also used : LoadPeonCallback(org.apache.druid.server.coordinator.LoadPeonCallback) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) SegmentId(org.apache.druid.timeline.SegmentId) BalancerSegmentHolder(org.apache.druid.server.coordinator.BalancerSegmentHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 20 with ServerHolder

use of org.apache.druid.server.coordinator.ServerHolder in project druid by druid-io.

the class UnloadUnusedSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    Set<DataSegment> usedSegments = params.getUsedSegments();
    DruidCluster cluster = params.getDruidCluster();
    Map<String, Boolean> broadcastStatusByDatasource = new HashMap<>();
    for (String broadcastDatasource : params.getBroadcastDatasources()) {
        broadcastStatusByDatasource.put(broadcastDatasource, true);
    }
    for (SortedSet<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
        }
    }
    for (ServerHolder serverHolder : cluster.getBrokers()) {
        handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, false, broadcastStatusByDatasource);
    }
    for (ServerHolder serverHolder : cluster.getRealtimes()) {
        handleUnusedSegmentsForServer(serverHolder, usedSegments, params, stats, true, broadcastStatusByDatasource);
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment)

Aggregations

ServerHolder (org.apache.druid.server.coordinator.ServerHolder)32 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)22 DataSegment (org.apache.druid.timeline.DataSegment)22 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)18 Test (org.junit.Test)16 DruidServer (org.apache.druid.client.DruidServer)15 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)15 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)15 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)7 LoadQueuePeonTester (org.apache.druid.server.coordinator.LoadQueuePeonTester)5 HashMap (java.util.HashMap)4 Object2IntMap (it.unimi.dsi.fastutil.objects.Object2IntMap)3 List (java.util.List)3 Collectors (java.util.stream.Collectors)3 ServerType (org.apache.druid.server.coordination.ServerType)3 DruidCoordinator (org.apache.druid.server.coordinator.DruidCoordinator)3 SegmentId (org.apache.druid.timeline.SegmentId)3 Object2LongMap (it.unimi.dsi.fastutil.objects.Object2LongMap)2 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2