Search in sources :

Example 6 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class DruidCoordinatorTest method testMoveSegment.

@Test
public void testMoveSegment() {
    final DataSegment segment = EasyMock.createNiceMock(DataSegment.class);
    EasyMock.expect(segment.getId()).andReturn(SegmentId.dummy("dummySegment"));
    EasyMock.expect(segment.getDataSource()).andReturn("dummyDataSource");
    EasyMock.replay(segment);
    loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
    EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
    loadQueuePeon.markSegmentToDrop(segment);
    EasyMock.expectLastCall().once();
    Capture<LoadPeonCallback> loadCallbackCapture = Capture.newInstance();
    Capture<LoadPeonCallback> dropCallbackCapture = Capture.newInstance();
    loadQueuePeon.loadSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(loadCallbackCapture));
    EasyMock.expectLastCall().once();
    loadQueuePeon.dropSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(dropCallbackCapture));
    EasyMock.expectLastCall().once();
    loadQueuePeon.unmarkSegmentToDrop(segment);
    EasyMock.expectLastCall().once();
    EasyMock.expect(loadQueuePeon.getSegmentsToDrop()).andReturn(new HashSet<>()).once();
    EasyMock.replay(loadQueuePeon);
    ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(segment);
    EasyMock.replay(druidDataSource);
    EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
    EasyMock.replay(segmentsMetadataManager);
    EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
    EasyMock.replay(dataSourcesSnapshot);
    scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
    EasyMock.replay(scheduledExecutorFactory);
    EasyMock.replay(metadataRuleManager);
    ImmutableDruidDataSource dataSource = EasyMock.createMock(ImmutableDruidDataSource.class);
    EasyMock.expect(dataSource.getSegments()).andReturn(Collections.singletonList(segment)).anyTimes();
    EasyMock.replay(dataSource);
    EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
    EasyMock.replay(druidServer);
    DruidServer druidServer2 = EasyMock.createMock(DruidServer.class);
    EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
    EasyMock.replay(druidServer2);
    loadManagementPeons.put("from", loadQueuePeon);
    loadManagementPeons.put("to", loadQueuePeon);
    EasyMock.expect(serverInventoryView.isSegmentLoadedByServer("to", segment)).andReturn(true).once();
    EasyMock.replay(serverInventoryView);
    mockCoordinatorRuntimeParams();
    coordinator.moveSegment(coordinatorRuntimeParams, druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), segment, null);
    LoadPeonCallback loadCallback = loadCallbackCapture.getValue();
    loadCallback.execute();
    LoadPeonCallback dropCallback = dropCallbackCapture.getValue();
    dropCallback.execute();
    EasyMock.verify(druidServer, druidServer2, loadQueuePeon, serverInventoryView, metadataRuleManager);
    EasyMock.verify(coordinatorRuntimeParams);
}
Also used : ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) SegmentId(org.apache.druid.timeline.SegmentId) DruidServer(org.apache.druid.client.DruidServer) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 7 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class BalanceSegmentsProfiler method bigProfiler.

public void bigProfiler() {
    Stopwatch watch = Stopwatch.createUnstarted();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.of("test", rules)).anyTimes();
    EasyMock.expect(manager.getRules(EasyMock.anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);
    coordinator.moveSegment(EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);
    Map<String, LoadQueuePeon> peonMap = new HashMap<>();
    List<ServerHolder> serverHolderList = new ArrayList<>();
    List<DataSegment> segments = new ArrayList<>();
    for (int i = 0; i < numSegments; i++) {
        segments.add(new DataSegment("datasource" + i, new Interval(DateTimes.of("2012-01-01"), (DateTimes.of("2012-01-01")).plusHours(1)), (DateTimes.of("2012-03-01")).toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 4L));
    }
    for (int i = 0; i < numServers; i++) {
        ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            ImmutableDruidServerTests.expectSegments(server, segments);
        } else {
            ImmutableDruidServerTests.expectSegments(server, Collections.emptyList());
        }
        EasyMock.expect(server.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);
        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", serverHolderList.toArray(new ServerHolder[0])).build();
    DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder(druidCluster).withLoadManagementPeons(peonMap).withUsedSegmentsInTest(segments).withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).withReplicantLifetime(500).withReplicationThrottleLimit(5).build()).withEmitter(emitter).withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500, false)).build();
    BalanceSegmentsTester tester = new BalanceSegmentsTester(coordinator);
    RunRules runner = new RunRules(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    DruidCoordinatorRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}
Also used : RunRules(org.apache.druid.server.coordinator.duty.RunRules) HashMap(java.util.HashMap) Stopwatch(com.google.common.base.Stopwatch) ArrayList(java.util.ArrayList) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Interval(org.joda.time.Interval)

Example 8 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class BalanceSegments method balanceServers.

private Pair<Integer, Integer> balanceServers(DruidCoordinatorRuntimeParams params, List<ServerHolder> toMoveFrom, List<ServerHolder> toMoveTo, int maxSegmentsToMove) {
    if (maxSegmentsToMove <= 0) {
        log.debug("maxSegmentsToMove is 0; no balancing work can be performed.");
        return new Pair<>(0, 0);
    } else if (toMoveFrom.isEmpty()) {
        log.debug("toMoveFrom is empty; no balancing work can be performed.");
        return new Pair<>(0, 0);
    } else if (toMoveTo.isEmpty()) {
        log.debug("toMoveTo is empty; no balancing work can be peformed.");
        return new Pair<>(0, 0);
    }
    final BalancerStrategy strategy = params.getBalancerStrategy();
    final int maxIterations = 2 * maxSegmentsToMove;
    final int maxToLoad = params.getCoordinatorDynamicConfig().getMaxSegmentsInNodeLoadingQueue();
    int moved = 0, unmoved = 0;
    Iterator<BalancerSegmentHolder> segmentsToMove;
    // The pick method depends on if the operator has enabled batched segment sampling in the Coorinator dynamic config.
    if (params.getCoordinatorDynamicConfig().useBatchedSegmentSampler()) {
        segmentsToMove = strategy.pickSegmentsToMove(toMoveFrom, params.getBroadcastDatasources(), maxSegmentsToMove);
    } else {
        segmentsToMove = strategy.pickSegmentsToMove(toMoveFrom, params.getBroadcastDatasources(), params.getCoordinatorDynamicConfig().getPercentOfSegmentsToConsiderPerMove());
    }
    // noinspection ForLoopThatDoesntUseLoopVariable
    for (int iter = 0; (moved + unmoved) < maxSegmentsToMove; ++iter) {
        if (!segmentsToMove.hasNext()) {
            log.info("All servers to move segments from are empty, ending run.");
            break;
        }
        final BalancerSegmentHolder segmentToMoveHolder = segmentsToMove.next();
        // DruidCoordinatorRuntimeParams.getUsedSegments originate from SegmentsMetadataManager, i. e. that's a set of segments
        // that *should* be loaded. segmentToMoveHolder.getSegment originates from ServerInventoryView,  i. e. that may be
        // any segment that happens to be loaded on some server, even if it is not used. (Coordinator closes such
        // discrepancies eventually via UnloadUnusedSegments). Therefore the picked segmentToMoveHolder's segment may not
        // need to be balanced.
        boolean needToBalancePickedSegment = params.getUsedSegments().contains(segmentToMoveHolder.getSegment());
        if (needToBalancePickedSegment) {
            final DataSegment segmentToMove = segmentToMoveHolder.getSegment();
            final ImmutableDruidServer fromServer = segmentToMoveHolder.getFromServer();
            // we want to leave the server the segment is currently on in the list...
            // but filter out replicas that are already serving the segment, and servers with a full load queue
            final List<ServerHolder> toMoveToWithLoadQueueCapacityAndNotServingSegment = toMoveTo.stream().filter(s -> s.getServer().equals(fromServer) || (!s.isServingSegment(segmentToMove) && (maxToLoad <= 0 || s.getNumberOfSegmentsInQueue() < maxToLoad))).collect(Collectors.toList());
            if (toMoveToWithLoadQueueCapacityAndNotServingSegment.size() > 0) {
                final ServerHolder destinationHolder = strategy.findNewSegmentHomeBalancer(segmentToMove, toMoveToWithLoadQueueCapacityAndNotServingSegment);
                if (destinationHolder != null && !destinationHolder.getServer().equals(fromServer)) {
                    if (moveSegment(segmentToMoveHolder, destinationHolder.getServer(), params)) {
                        moved++;
                    } else {
                        unmoved++;
                    }
                } else {
                    log.debug("Segment [%s] is 'optimally' placed.", segmentToMove.getId());
                    unmoved++;
                }
            } else {
                log.debug("No valid movement destinations for segment [%s].", segmentToMove.getId());
                unmoved++;
            }
        }
        if (iter >= maxIterations) {
            log.info("Unable to select %d remaining candidate segments out of %d total to balance " + "after %d iterations, ending run.", (maxSegmentsToMove - moved - unmoved), maxSegmentsToMove, iter);
            break;
        }
    }
    return new Pair<>(moved, unmoved);
}
Also used : EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Iterator(java.util.Iterator) SortedSet(java.util.SortedSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) LoadPeonCallback(org.apache.druid.server.coordinator.LoadPeonCallback) HashMap(java.util.HashMap) NavigableSet(java.util.NavigableSet) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Collectors(java.util.stream.Collectors) Pair(org.apache.druid.java.util.common.Pair) ConcurrentMap(java.util.concurrent.ConcurrentMap) List(java.util.List) Lists(com.google.common.collect.Lists) BalancerStrategy(org.apache.druid.server.coordinator.BalancerStrategy) Map(java.util.Map) DataSegment(org.apache.druid.timeline.DataSegment) BalancerSegmentHolder(org.apache.druid.server.coordinator.BalancerSegmentHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) SegmentId(org.apache.druid.timeline.SegmentId) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) BalancerStrategy(org.apache.druid.server.coordinator.BalancerStrategy) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) BalancerSegmentHolder(org.apache.druid.server.coordinator.BalancerSegmentHolder) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Pair(org.apache.druid.java.util.common.Pair)

Example 9 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class EmitClusterStatsAndMetrics method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    stats.forEachTieredStat("assignedCount", (final String tier, final long count) -> {
        log.info("[%s] : Assigned %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/assigned/count", tier, count);
    });
    stats.forEachTieredStat("droppedCount", (final String tier, final long count) -> {
        log.info("[%s] : Dropped %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/dropped/count", tier, count);
    });
    emitTieredStats(emitter, "segment/cost/raw", stats, "initialCost");
    emitTieredStats(emitter, "segment/cost/normalization", stats, "normalization");
    emitTieredStats(emitter, "segment/moved/count", stats, "movedCount");
    emitTieredStats(emitter, "segment/deleted/count", stats, "deletedCount");
    stats.forEachTieredStat("normalizedInitialCostTimesOneThousand", (final String tier, final long count) -> {
        emitTieredStat(emitter, "segment/cost/normalized", tier, count / 1000d);
    });
    stats.forEachTieredStat("unneededCount", (final String tier, final long count) -> {
        log.info("[%s] : Removed %s unneeded segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/unneeded/count", tier, count);
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStat("overShadowedCount")));
    stats.forEachTieredStat("movedCount", (final String tier, final long count) -> {
        log.info("[%s] : Moved %,d segment(s)", tier, count);
    });
    stats.forEachTieredStat("unmovedCount", (final String tier, final long count) -> {
        log.info("[%s] : Let alone %,d segment(s)", tier, count);
    });
    log.info("Load Queues:");
    for (Iterable<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType().toString(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
            stats.addToTieredStat(TOTAL_CAPACITY, server.getTier(), server.getMaxSize());
            stats.addToTieredStat(TOTAL_HISTORICAL_COUNT, server.getTier(), 1);
        }
    }
    params.getDatabaseRuleManager().getAllRules().values().forEach(rules -> rules.forEach(rule -> {
        if (rule instanceof LoadRule) {
            ((LoadRule) rule).getTieredReplicants().forEach((tier, replica) -> stats.accumulateMaxTieredStat(MAX_REPLICATION_FACTOR, tier, replica));
        }
    }));
    emitTieredStats(emitter, "tier/required/capacity", stats, LoadRule.REQUIRED_CAPACITY);
    emitTieredStats(emitter, "tier/total/capacity", stats, TOTAL_CAPACITY);
    emitTieredStats(emitter, "tier/replication/factor", stats, MAX_REPLICATION_FACTOR);
    emitTieredStats(emitter, "tier/historical/count", stats, TOTAL_HISTORICAL_COUNT);
    // Emit coordinator metrics
    params.getLoadManagementPeons().forEach((final String serverName, final LoadQueuePeon queuePeon) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    });
    coordinator.computeNumsUnavailableUsedSegmentsPerDataSource().object2IntEntrySet().forEach((final Object2IntMap.Entry<String> entry) -> {
        final String dataSource = entry.getKey();
        final int numUnavailableUsedSegmentsInDataSource = entry.getIntValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/unavailable/count", numUnavailableUsedSegmentsInDataSource));
    });
    coordinator.computeUnderReplicationCountsPerDataSourcePerTier().forEach((final String tier, final Object2LongMap<String> underReplicationCountsPerDataSource) -> {
        for (final Object2LongMap.Entry<String> entry : underReplicationCountsPerDataSource.object2LongEntrySet()) {
            final String dataSource = entry.getKey();
            final long underReplicationCount = entry.getLongValue();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/underReplicated/count", underReplicationCount));
        }
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("compact/task/count", stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/maxSlot/count", stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/availableSlot/count", stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT)));
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/compacted/count", count));
    });
    // Emit segment metrics
    params.getUsedSegmentsTimelinesPerDataSource().forEach((String dataSource, VersionedIntervalTimeline<String, DataSegment> dataSourceWithUsedSegments) -> {
        long totalSizeOfUsedSegments = dataSourceWithUsedSegments.iterateAllObjects().stream().mapToLong(DataSegment::getSize).sum();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", totalSizeOfUsedSegments));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", dataSourceWithUsedSegments.getNumObjects()));
    });
    // Emit coordinator runtime stats
    emitDutyStats(emitter, "coordinator/time", stats, "runtime");
    return params;
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Logger(org.apache.druid.java.util.common.logger.Logger) DruidMetrics(org.apache.druid.query.DruidMetrics) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 10 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class SegmentReplicantLookup method make.

public static SegmentReplicantLookup make(DruidCluster cluster, boolean replicateAfterLoadTimeout) {
    final Table<SegmentId, String, Integer> segmentsInCluster = HashBasedTable.create();
    /**
     * For each tier, this stores the number of replicants for all the segments presently queued to load in {@link cluster}.
     * Segments that have failed to load due to the load timeout may not be present in this table if {@link replicateAfterLoadTimeout} is true.
     * This is to enable additional replication of the timed out segments for improved availability.
     */
    final Table<SegmentId, String, Integer> loadingSegments = HashBasedTable.create();
    for (SortedSet<ServerHolder> serversByType : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serversByType) {
            ImmutableDruidServer server = serverHolder.getServer();
            for (DataSegment segment : server.iterateAllSegments()) {
                Integer numReplicants = segmentsInCluster.get(segment.getId(), server.getTier());
                if (numReplicants == null) {
                    numReplicants = 0;
                }
                segmentsInCluster.put(segment.getId(), server.getTier(), numReplicants + 1);
            }
            // Also account for queued segments
            for (DataSegment segment : serverHolder.getPeon().getSegmentsToLoad()) {
                Integer numReplicants = loadingSegments.get(segment.getId(), server.getTier());
                if (numReplicants == null) {
                    numReplicants = 0;
                }
                // Therefore we skip incrementing numReplicants for timed out segments if replicateAfterLoadTimeout is enabled.
                if (!replicateAfterLoadTimeout || !serverHolder.getPeon().getTimedOutSegments().contains(segment)) {
                    loadingSegments.put(segment.getId(), server.getTier(), numReplicants + 1);
                }
            }
        }
    }
    return new SegmentReplicantLookup(segmentsInCluster, loadingSegments, cluster);
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Aggregations

ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)23 DataSegment (org.apache.druid.timeline.DataSegment)17 Test (org.junit.Test)13 DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)9 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)8 List (java.util.List)7 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)7 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)7 SegmentId (org.apache.druid.timeline.SegmentId)7 DruidServer (org.apache.druid.client.DruidServer)6 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)6 ArrayList (java.util.ArrayList)5 Collectors (java.util.stream.Collectors)5 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 Map (java.util.Map)4 HashMap (java.util.HashMap)3 Intervals (org.apache.druid.java.util.common.Intervals)3 ServerType (org.apache.druid.server.coordination.ServerType)3 EasyMock (org.easymock.EasyMock)3