Search in sources :

Example 6 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class ClientQuerySegmentWalkerTest method initWalker.

/**
 * Initialize (or reinitialize) our {@link #walker} and {@link #closer}.
 */
private void initWalker(final Map<String, String> serverProperties, QueryScheduler schedulerForTest) {
    final ObjectMapper jsonMapper = TestHelper.makeJsonMapper();
    final ServerConfig serverConfig = jsonMapper.convertValue(serverProperties, ServerConfig.class);
    final SegmentWrangler segmentWrangler = new MapSegmentWrangler(ImmutableMap.<Class<? extends DataSource>, SegmentWrangler>builder().put(InlineDataSource.class, new InlineSegmentWrangler()).build());
    final JoinableFactory globalFactory = new JoinableFactory() {

        @Override
        public boolean isDirectlyJoinable(DataSource dataSource) {
            return ((GlobalTableDataSource) dataSource).getName().equals(GLOBAL);
        }

        @Override
        public Optional<Joinable> build(DataSource dataSource, JoinConditionAnalysis condition) {
            return Optional.empty();
        }
    };
    final JoinableFactory joinableFactory = new MapJoinableFactory(ImmutableSet.of(globalFactory, new InlineJoinableFactory()), ImmutableMap.<Class<? extends JoinableFactory>, Class<? extends DataSource>>builder().put(InlineJoinableFactory.class, InlineDataSource.class).put(globalFactory.getClass(), GlobalTableDataSource.class).build());
    class CapturingWalker implements QuerySegmentWalker {

        private QuerySegmentWalker baseWalker;

        private ClusterOrLocal how;

        CapturingWalker(QuerySegmentWalker baseWalker, ClusterOrLocal how) {
            this.baseWalker = baseWalker;
            this.how = how;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
            final QueryRunner<T> baseRunner = baseWalker.getQueryRunnerForIntervals(query, intervals);
            return (queryPlus, responseContext) -> {
                log.info("Query (%s): %s", how, queryPlus.getQuery());
                issuedQueries.add(new ExpectedQuery(queryPlus.getQuery(), how));
                return baseRunner.run(queryPlus, responseContext);
            };
        }

        @Override
        public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
            final QueryRunner<T> baseRunner = baseWalker.getQueryRunnerForSegments(query, specs);
            return (queryPlus, responseContext) -> {
                log.info("Query (%s): %s", how, queryPlus.getQuery());
                issuedQueries.add(new ExpectedQuery(queryPlus.getQuery(), how));
                return baseRunner.run(queryPlus, responseContext);
            };
        }
    }
    walker = QueryStackTests.createClientQuerySegmentWalker(new CapturingWalker(QueryStackTests.createClusterQuerySegmentWalker(ImmutableMap.<String, VersionedIntervalTimeline<String, ReferenceCountingSegment>>builder().put(FOO, makeTimeline(FOO, FOO_INLINE)).put(BAR, makeTimeline(BAR, BAR_INLINE)).put(MULTI, makeTimeline(MULTI, MULTI_VALUE_INLINE)).put(GLOBAL, makeTimeline(GLOBAL, FOO_INLINE)).put(ARRAY, makeTimeline(ARRAY, ARRAY_INLINE)).put(ARRAY_UNKNOWN, makeTimeline(ARRAY_UNKNOWN, ARRAY_INLINE_UNKNOWN)).build(), joinableFactory, conglomerate, schedulerForTest), ClusterOrLocal.CLUSTER), new CapturingWalker(QueryStackTests.createLocalQuerySegmentWalker(conglomerate, segmentWrangler, joinableFactory, schedulerForTest), ClusterOrLocal.LOCAL), conglomerate, joinableFactory, serverConfig);
}
Also used : QueryToolChestTestHelper(org.apache.druid.query.QueryToolChestTestHelper) QueryPlus(org.apache.druid.query.QueryPlus) Arrays(java.util.Arrays) RowBasedSegment(org.apache.druid.segment.RowBasedSegment) SegmentWrangler(org.apache.druid.segment.SegmentWrangler) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Druids(org.apache.druid.query.Druids) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) After(org.junit.After) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) ManualQueryPrioritizationStrategy(org.apache.druid.server.scheduling.ManualQueryPrioritizationStrategy) Sequence(org.apache.druid.java.util.common.guava.Sequence) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Closer(org.apache.druid.java.util.common.io.Closer) DataSource(org.apache.druid.query.DataSource) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) JoinConditionAnalysis(org.apache.druid.segment.join.JoinConditionAnalysis) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) QueryContexts(org.apache.druid.query.QueryContexts) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) Objects(java.util.Objects) InlineJoinableFactory(org.apache.druid.segment.join.InlineJoinableFactory) QueryDataSource(org.apache.druid.query.QueryDataSource) List(java.util.List) MapSegmentWrangler(org.apache.druid.segment.MapSegmentWrangler) Optional(java.util.Optional) SegmentId(org.apache.druid.timeline.SegmentId) Logger(org.apache.druid.java.util.common.logger.Logger) ComparableList(org.apache.druid.segment.data.ComparableList) Joinable(org.apache.druid.segment.join.Joinable) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) JoinType(org.apache.druid.segment.join.JoinType) InlineSegmentWrangler(org.apache.druid.segment.InlineSegmentWrangler) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) DirectDruidClient(org.apache.druid.client.DirectDruidClient) ScanQuery(org.apache.druid.query.scan.ScanQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) ArrayList(java.util.ArrayList) Interval(org.joda.time.Interval) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) JoinDataSource(org.apache.druid.query.JoinDataSource) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) ComparableStringArray(org.apache.druid.segment.data.ComparableStringArray) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) ExpectedException(org.junit.rules.ExpectedException) Sequences(org.apache.druid.java.util.common.guava.Sequences) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) InlineDataSource(org.apache.druid.query.InlineDataSource) GroupByStrategyV2(org.apache.druid.query.groupby.strategy.GroupByStrategyV2) ResponseContext(org.apache.druid.query.context.ResponseContext) ServerConfig(org.apache.druid.server.initialization.ServerConfig) TopNQueryBuilder(org.apache.druid.query.topn.TopNQueryBuilder) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) QueryRunnerFactoryConglomerate(org.apache.druid.query.QueryRunnerFactoryConglomerate) Test(org.junit.Test) IOException(java.io.IOException) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) TableDataSource(org.apache.druid.query.TableDataSource) Granularities(org.apache.druid.java.util.common.granularity.Granularities) TestHelper(org.apache.druid.segment.TestHelper) Rule(org.junit.Rule) UnionDataSource(org.apache.druid.query.UnionDataSource) NullHandling(org.apache.druid.common.config.NullHandling) RowSignature(org.apache.druid.segment.column.RowSignature) GroupByQueryHelper(org.apache.druid.query.groupby.GroupByQueryHelper) ResourceLimitExceededException(org.apache.druid.query.ResourceLimitExceededException) ColumnType(org.apache.druid.segment.column.ColumnType) NoQueryLaningStrategy(org.apache.druid.server.scheduling.NoQueryLaningStrategy) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Assert(org.junit.Assert) Comparator(java.util.Comparator) Collections(java.util.Collections) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) ScanQuery(org.apache.druid.query.scan.ScanQuery) TopNQuery(org.apache.druid.query.topn.TopNQuery) Query(org.apache.druid.query.Query) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) JoinConditionAnalysis(org.apache.druid.segment.join.JoinConditionAnalysis) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) InlineJoinableFactory(org.apache.druid.segment.join.InlineJoinableFactory) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) DataSource(org.apache.druid.query.DataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) JoinDataSource(org.apache.druid.query.JoinDataSource) InlineDataSource(org.apache.druid.query.InlineDataSource) TableDataSource(org.apache.druid.query.TableDataSource) UnionDataSource(org.apache.druid.query.UnionDataSource) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) InlineSegmentWrangler(org.apache.druid.segment.InlineSegmentWrangler) ServerConfig(org.apache.druid.server.initialization.ServerConfig) SegmentWrangler(org.apache.druid.segment.SegmentWrangler) MapSegmentWrangler(org.apache.druid.segment.MapSegmentWrangler) InlineSegmentWrangler(org.apache.druid.segment.InlineSegmentWrangler) InlineDataSource(org.apache.druid.query.InlineDataSource) Joinable(org.apache.druid.segment.join.Joinable) MapSegmentWrangler(org.apache.druid.segment.MapSegmentWrangler) InlineJoinableFactory(org.apache.druid.segment.join.InlineJoinableFactory) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper)

Example 7 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class ServerManager method getQueryRunnerForSegments.

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        final QueryUnsupportedException e = new QueryUnsupportedException(StringUtils.format("Unknown query type, [%s]", query.getClass()));
        log.makeAlert(e, "Error while executing a query[%s]", query.getId()).addData("dataSource", query.getDataSource()).emit();
        throw e;
    }
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
    final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
    final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
    // Make sure this query type can handle the subquery, if present.
    if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
        throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
    }
    if (maybeTimeline.isPresent()) {
        timeline = maybeTimeline.get();
    } else {
        return new ReportTimelineMissingSegmentQueryRunner<>(Lists.newArrayList(specs));
    }
    // segmentMapFn maps each base Segment into a joined Segment if necessary.
    final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), cpuTimeAccumulator, analysis.getBaseQuery().orElse(query));
    // We compute the join cache key here itself so it doesn't need to be re-computed for every segment
    final Optional<byte[]> cacheKeyPrefix = analysis.isJoin() ? joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis) : Optional.of(StringUtils.EMPTY_BYTES);
    final FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(specs).transformCat(descriptor -> Collections.singletonList(buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix)));
    return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(queryProcessingPool, queryRunners)), toolChest), toolChest, emitter, cpuTimeAccumulator, true);
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) Query(org.apache.druid.query.Query) QueryUnsupportedException(org.apache.druid.query.QueryUnsupportedException) SegmentReference(org.apache.druid.segment.SegmentReference) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SpecificSegmentQueryRunner(org.apache.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(org.apache.druid.query.QueryRunner) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) BySegmentQueryRunner(org.apache.druid.query.BySegmentQueryRunner) SetAndVerifyContextQueryRunner(org.apache.druid.server.SetAndVerifyContextQueryRunner) PerSegmentOptimizingQueryRunner(org.apache.druid.query.PerSegmentOptimizingQueryRunner) CachingQueryRunner(org.apache.druid.client.CachingQueryRunner) MetricsEmittingQueryRunner(org.apache.druid.query.MetricsEmittingQueryRunner) ReferenceCountingSegmentQueryRunner(org.apache.druid.query.ReferenceCountingSegmentQueryRunner) CPUTimeMetricQueryRunner(org.apache.druid.query.CPUTimeMetricQueryRunner) AtomicLong(java.util.concurrent.atomic.AtomicLong) Filters(org.apache.druid.segment.filter.Filters) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ISE(org.apache.druid.java.util.common.ISE)

Example 8 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class ServerManager method getQueryRunnerForIntervals.

@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
    final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
    final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
    final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
    if (maybeTimeline.isPresent()) {
        timeline = maybeTimeline.get();
    } else {
        // we must find.
        return new NoopQueryRunner<>();
    }
    FunctionalIterable<SegmentDescriptor> segmentDescriptors = FunctionalIterable.create(intervals).transformCat(timeline::lookup).transformCat(holder -> {
        if (holder == null) {
            return null;
        }
        return FunctionalIterable.create(holder.getObject()).transform(partitionChunk -> new SegmentDescriptor(holder.getInterval(), holder.getVersion(), partitionChunk.getChunkNumber()));
    });
    return getQueryRunnerForSegments(query, segmentDescriptors);
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) NoopQueryRunner(org.apache.druid.query.NoopQueryRunner) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) DataSourceAnalysis(org.apache.druid.query.planning.DataSourceAnalysis)

Example 9 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class EmitClusterStatsAndMetrics method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    stats.forEachTieredStat("assignedCount", (final String tier, final long count) -> {
        log.info("[%s] : Assigned %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/assigned/count", tier, count);
    });
    stats.forEachTieredStat("droppedCount", (final String tier, final long count) -> {
        log.info("[%s] : Dropped %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/dropped/count", tier, count);
    });
    emitTieredStats(emitter, "segment/cost/raw", stats, "initialCost");
    emitTieredStats(emitter, "segment/cost/normalization", stats, "normalization");
    emitTieredStats(emitter, "segment/moved/count", stats, "movedCount");
    emitTieredStats(emitter, "segment/deleted/count", stats, "deletedCount");
    stats.forEachTieredStat("normalizedInitialCostTimesOneThousand", (final String tier, final long count) -> {
        emitTieredStat(emitter, "segment/cost/normalized", tier, count / 1000d);
    });
    stats.forEachTieredStat("unneededCount", (final String tier, final long count) -> {
        log.info("[%s] : Removed %s unneeded segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/unneeded/count", tier, count);
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStat("overShadowedCount")));
    stats.forEachTieredStat("movedCount", (final String tier, final long count) -> {
        log.info("[%s] : Moved %,d segment(s)", tier, count);
    });
    stats.forEachTieredStat("unmovedCount", (final String tier, final long count) -> {
        log.info("[%s] : Let alone %,d segment(s)", tier, count);
    });
    log.info("Load Queues:");
    for (Iterable<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType().toString(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
            stats.addToTieredStat(TOTAL_CAPACITY, server.getTier(), server.getMaxSize());
            stats.addToTieredStat(TOTAL_HISTORICAL_COUNT, server.getTier(), 1);
        }
    }
    params.getDatabaseRuleManager().getAllRules().values().forEach(rules -> rules.forEach(rule -> {
        if (rule instanceof LoadRule) {
            ((LoadRule) rule).getTieredReplicants().forEach((tier, replica) -> stats.accumulateMaxTieredStat(MAX_REPLICATION_FACTOR, tier, replica));
        }
    }));
    emitTieredStats(emitter, "tier/required/capacity", stats, LoadRule.REQUIRED_CAPACITY);
    emitTieredStats(emitter, "tier/total/capacity", stats, TOTAL_CAPACITY);
    emitTieredStats(emitter, "tier/replication/factor", stats, MAX_REPLICATION_FACTOR);
    emitTieredStats(emitter, "tier/historical/count", stats, TOTAL_HISTORICAL_COUNT);
    // Emit coordinator metrics
    params.getLoadManagementPeons().forEach((final String serverName, final LoadQueuePeon queuePeon) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    });
    coordinator.computeNumsUnavailableUsedSegmentsPerDataSource().object2IntEntrySet().forEach((final Object2IntMap.Entry<String> entry) -> {
        final String dataSource = entry.getKey();
        final int numUnavailableUsedSegmentsInDataSource = entry.getIntValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/unavailable/count", numUnavailableUsedSegmentsInDataSource));
    });
    coordinator.computeUnderReplicationCountsPerDataSourcePerTier().forEach((final String tier, final Object2LongMap<String> underReplicationCountsPerDataSource) -> {
        for (final Object2LongMap.Entry<String> entry : underReplicationCountsPerDataSource.object2LongEntrySet()) {
            final String dataSource = entry.getKey();
            final long underReplicationCount = entry.getLongValue();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/underReplicated/count", underReplicationCount));
        }
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("compact/task/count", stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/maxSlot/count", stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/availableSlot/count", stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT)));
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/compacted/count", count));
    });
    // Emit segment metrics
    params.getUsedSegmentsTimelinesPerDataSource().forEach((String dataSource, VersionedIntervalTimeline<String, DataSegment> dataSourceWithUsedSegments) -> {
        long totalSizeOfUsedSegments = dataSourceWithUsedSegments.iterateAllObjects().stream().mapToLong(DataSegment::getSize).sum();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", totalSizeOfUsedSegments));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", dataSourceWithUsedSegments.getNumObjects()));
    });
    // Emit coordinator runtime stats
    emitDutyStats(emitter, "coordinator/time", stats, "runtime");
    return params;
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Logger(org.apache.druid.java.util.common.logger.Logger) DruidMetrics(org.apache.druid.query.DruidMetrics) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 10 with VersionedIntervalTimeline

use of org.apache.druid.timeline.VersionedIntervalTimeline in project druid by druid-io.

the class NumberedShardSpecTest method testVersionedIntervalTimelineBehaviorForNumberedShardSpec.

private void testVersionedIntervalTimelineBehaviorForNumberedShardSpec(List<PartitionChunk<OvershadowableString>> chunks, Set<OvershadowableString> expectedObjects) {
    VersionedIntervalTimeline<String, OvershadowableString> timeline = new VersionedIntervalTimeline<>(Ordering.natural());
    Interval interval = Intervals.of("2000/3000");
    String version = "v1";
    for (PartitionChunk<OvershadowableString> chunk : chunks) {
        timeline.add(interval, version, chunk);
    }
    Set<OvershadowableString> actualObjects = new HashSet<>();
    List<TimelineObjectHolder<String, OvershadowableString>> entries = timeline.lookup(interval);
    for (TimelineObjectHolder<String, OvershadowableString> entry : entries) {
        for (PartitionChunk<OvershadowableString> chunk : entry.getObject()) {
            actualObjects.add(chunk.getObject());
        }
    }
    Assert.assertEquals(expectedObjects, actualObjects);
}
Also used : TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Interval(org.joda.time.Interval) HashSet(java.util.HashSet)

Aggregations

VersionedIntervalTimeline (org.apache.druid.timeline.VersionedIntervalTimeline)19 DataSegment (org.apache.druid.timeline.DataSegment)10 ArrayList (java.util.ArrayList)9 Interval (org.joda.time.Interval)9 List (java.util.List)8 Map (java.util.Map)8 ReferenceCountingSegment (org.apache.druid.segment.ReferenceCountingSegment)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 Intervals (org.apache.druid.java.util.common.Intervals)6 Logger (org.apache.druid.java.util.common.logger.Logger)6 Test (org.junit.Test)6 ImmutableMap (com.google.common.collect.ImmutableMap)5 Collections (java.util.Collections)5 HashMap (java.util.HashMap)5 Optional (java.util.Optional)5 ISE (org.apache.druid.java.util.common.ISE)5 QueryRunner (org.apache.druid.query.QueryRunner)5 SegmentDescriptor (org.apache.druid.query.SegmentDescriptor)5 TableDataSource (org.apache.druid.query.TableDataSource)5 DataSourceAnalysis (org.apache.druid.query.planning.DataSourceAnalysis)5