Search in sources :

Example 6 with ServiceEmitter

use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.

the class CachingQueryRunnerTest method testCloseAndPopulate.

private void testCloseAndPopulate(List<Result> expectedRes, List<Result> expectedCacheRes, Query query, QueryToolChest toolchest) throws Exception {
    final AssertingClosable closable = new AssertingClosable();
    final Sequence resultSeq = Sequences.wrap(Sequences.simple(expectedRes), new SequenceWrapper() {

        @Override
        public void before() {
            Assert.assertFalse(closable.isClosed());
        }

        @Override
        public void after(boolean isDone, Throwable thrown) {
            closable.close();
        }
    });
    final CountDownLatch cacheMustBePutOnce = new CountDownLatch(1);
    Cache cache = new Cache() {

        private final ConcurrentMap<NamedKey, byte[]> baseMap = new ConcurrentHashMap<>();

        @Override
        public byte[] get(NamedKey key) {
            return baseMap.get(key);
        }

        @Override
        public void put(NamedKey key, byte[] value) {
            baseMap.put(key, value);
            cacheMustBePutOnce.countDown();
        }

        @Override
        public Map<NamedKey, byte[]> getBulk(Iterable<NamedKey> keys) {
            return null;
        }

        @Override
        public void close(String namespace) {
        }

        @Override
        public void close() {
        }

        @Override
        public CacheStats getStats() {
            return null;
        }

        @Override
        public boolean isLocal() {
            return true;
        }

        @Override
        public void doMonitor(ServiceEmitter emitter) {
        }
    };
    byte[] keyPrefix = RandomUtils.nextBytes(10);
    CachingQueryRunner runner = makeCachingQueryRunner(keyPrefix, cache, toolchest, resultSeq);
    CacheStrategy cacheStrategy = toolchest.getCacheStrategy(query);
    Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(CACHE_ID, SEGMENT_DESCRIPTOR, Bytes.concat(keyPrefix, cacheStrategy.computeCacheKey(query)));
    Assert.assertTrue(runner.canPopulateCache(query, cacheStrategy));
    Sequence res = runner.run(QueryPlus.wrap(query));
    // base sequence is not closed yet
    Assert.assertFalse("sequence must not be closed", closable.isClosed());
    Assert.assertNull("cache must be empty", cache.get(cacheKey));
    List results = res.toList();
    Assert.assertTrue(closable.isClosed());
    Assert.assertEquals(expectedRes.toString(), results.toString());
    // wait for background caching finish
    // wait at most 10 seconds to fail the test to avoid block overall tests
    Assert.assertTrue("cache must be populated", cacheMustBePutOnce.await(10, TimeUnit.SECONDS));
    byte[] cacheValue = cache.get(cacheKey);
    Assert.assertNotNull(cacheValue);
    Function<Object, Result> fn = cacheStrategy.pullFromSegmentLevelCache();
    List<Result> cacheResults = Lists.newArrayList(Iterators.transform(objectMapper.readValues(objectMapper.getFactory().createParser(cacheValue), cacheStrategy.getCacheObjectClazz()), fn));
    Assert.assertEquals(expectedCacheRes.toString(), cacheResults.toString());
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) SequenceWrapper(org.apache.druid.java.util.common.guava.SequenceWrapper) ConcurrentMap(java.util.concurrent.ConcurrentMap) Sequence(org.apache.druid.java.util.common.guava.Sequence) CountDownLatch(java.util.concurrent.CountDownLatch) Result(org.apache.druid.query.Result) List(java.util.List) ArrayList(java.util.ArrayList) CacheStrategy(org.apache.druid.query.CacheStrategy) MapCache(org.apache.druid.client.cache.MapCache) Cache(org.apache.druid.client.cache.Cache)

Example 7 with ServiceEmitter

use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.

the class SqlResourceTest method setUp.

@Before
public void setUp() throws Exception {
    final QueryScheduler scheduler = new QueryScheduler(5, ManualQueryPrioritizationStrategy.INSTANCE, new HiLoQueryLaningStrategy(40), new ServerConfig()) {

        @Override
        public <T> Sequence<T> run(Query<?> query, Sequence<T> resultSequence) {
            return super.run(query, new LazySequence<T>(() -> {
                if (sleep) {
                    try {
                        // pretend to be a query that is waiting on results
                        Thread.sleep(500);
                    } catch (InterruptedException ignored) {
                    }
                }
                return resultSequence;
            }));
        }
    };
    executorService = MoreExecutors.listeningDecorator(Execs.multiThreaded(8, "test_sql_resource_%s"));
    walker = CalciteTests.createMockWalker(conglomerate, temporaryFolder.newFolder(), scheduler);
    final PlannerConfig plannerConfig = new PlannerConfig() {

        @Override
        public boolean shouldSerializeComplexValues() {
            return false;
        }
    };
    final DruidSchemaCatalog rootSchema = CalciteTests.createMockRootSchema(conglomerate, walker, plannerConfig, CalciteTests.TEST_AUTHORIZER_MAPPER);
    final DruidOperatorTable operatorTable = CalciteTests.createOperatorTable();
    final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
    req = EasyMock.createStrictMock(HttpServletRequest.class);
    EasyMock.expect(req.getRemoteAddr()).andReturn(null).once();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(CalciteTests.REGULAR_USER_AUTH_RESULT).anyTimes();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_ALLOW_UNSECURED_PATH)).andReturn(null).anyTimes();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED)).andReturn(null).anyTimes();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(CalciteTests.REGULAR_USER_AUTH_RESULT).anyTimes();
    req.setAttribute(AuthConfig.DRUID_AUTHORIZATION_CHECKED, true);
    EasyMock.expectLastCall().anyTimes();
    EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT)).andReturn(CalciteTests.REGULAR_USER_AUTH_RESULT).anyTimes();
    EasyMock.replay(req);
    testRequestLogger = new TestRequestLogger();
    final PlannerFactory plannerFactory = new PlannerFactory(rootSchema, CalciteTests.createMockQueryMakerFactory(walker, conglomerate), operatorTable, macroTable, plannerConfig, CalciteTests.TEST_AUTHORIZER_MAPPER, CalciteTests.getJsonMapper(), CalciteTests.DRUID_SCHEMA_NAME);
    lifecycleManager = new SqlLifecycleManager() {

        @Override
        public void add(String sqlQueryId, SqlLifecycle lifecycle) {
            super.add(sqlQueryId, lifecycle);
            if (lifecycleAddLatch != null) {
                lifecycleAddLatch.countDown();
            }
        }
    };
    final ServiceEmitter emitter = new NoopServiceEmitter();
    sqlLifecycleFactory = new SqlLifecycleFactory(plannerFactory, emitter, testRequestLogger, scheduler) {

        @Override
        public SqlLifecycle factorize() {
            return new TestSqlLifecycle(plannerFactory, emitter, testRequestLogger, scheduler, System.currentTimeMillis(), System.nanoTime(), validateAndAuthorizeLatchSupplier, planLatchSupplier, executeLatchSupplier, sequenceMapFnSupplier);
        }
    };
    resource = new SqlResource(JSON_MAPPER, CalciteTests.TEST_AUTHORIZER_MAPPER, sqlLifecycleFactory, lifecycleManager, new ServerConfig());
}
Also used : SqlLifecycleManager(org.apache.druid.sql.SqlLifecycleManager) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) QueryScheduler(org.apache.druid.server.QueryScheduler) BaseQuery(org.apache.druid.query.BaseQuery) Query(org.apache.druid.query.Query) HiLoQueryLaningStrategy(org.apache.druid.server.scheduling.HiLoQueryLaningStrategy) SqlLifecycle(org.apache.druid.sql.SqlLifecycle) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) Sequence(org.apache.druid.java.util.common.guava.Sequence) LazySequence(org.apache.druid.java.util.common.guava.LazySequence) QueryInterruptedException(org.apache.druid.query.QueryInterruptedException) DruidOperatorTable(org.apache.druid.sql.calcite.planner.DruidOperatorTable) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) TestRequestLogger(org.apache.druid.server.log.TestRequestLogger) HttpServletRequest(javax.servlet.http.HttpServletRequest) ServerConfig(org.apache.druid.server.initialization.ServerConfig) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) DruidSchemaCatalog(org.apache.druid.sql.calcite.schema.DruidSchemaCatalog) PlannerFactory(org.apache.druid.sql.calcite.planner.PlannerFactory) SqlLifecycleFactory(org.apache.druid.sql.SqlLifecycleFactory) Before(org.junit.Before)

Example 8 with ServiceEmitter

use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.

the class DruidCoordinatorTest method testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments.

@Test
public void testInitializeCompactSegmentsDutyWhenCustomDutyGroupDoesNotContainsCompactSegments() {
    CoordinatorCustomDutyGroup group = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(new KillSupervisorsCustomDuty(new Duration("PT1S"), null)));
    CoordinatorCustomDutyGroups customDutyGroups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group));
    coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return "druid";
        }
    }, null, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            // count down when this coordinator becomes the leader
            leaderAnnouncerLatch.countDown();
        }

        @Override
        public void unannounce(DruidNode node) {
            leaderUnannouncerLatch.countDown();
        }
    }, druidNode, loadManagementPeons, ImmutableSet.of(), new HashSet<>(), customDutyGroups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
    // Since CompactSegments is not enabled in Custom Duty Group, then CompactSegments must be created in IndexingServiceDuties
    List<CoordinatorDuty> indexingDuties = coordinator.makeIndexingServiceDuties();
    Assert.assertTrue(indexingDuties.stream().anyMatch(coordinatorDuty -> coordinatorDuty instanceof CompactSegments));
    // CompactSegments should not exist in Custom Duty Group
    List<CompactSegments> compactSegmentsDutyFromCustomGroups = coordinator.getCompactSegmentsDutyFromCustomGroups();
    Assert.assertTrue(compactSegmentsDutyFromCustomGroups.isEmpty());
    // CompactSegments returned by this method should be created using the DruidCoordinatorConfig in the DruidCoordinator
    CompactSegments duty = coordinator.initializeCompactSegmentsDuty();
    Assert.assertNotNull(duty);
    Assert.assertEquals(druidCoordinatorConfig.getCompactionSkipLockedIntervals(), duty.isSkipLockedIntervals());
}
Also used : LookupCoordinatorManager(org.apache.druid.server.lookup.cache.LookupCoordinatorManager) IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) JacksonConfigManager(org.apache.druid.common.config.JacksonConfigManager) ForeverBroadcastDistributionRule(org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule) Event(org.apache.druid.java.util.emitter.core.Event) After(org.junit.After) Map(java.util.Map) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) ServerType(org.apache.druid.server.coordination.ServerType) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) Rule(org.apache.druid.server.coordinator.rules.Rule) CuratorUtils(org.apache.druid.curator.CuratorUtils) Executors(java.util.concurrent.Executors) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) DruidLeaderSelector(org.apache.druid.discovery.DruidLeaderSelector) DataSegment(org.apache.druid.timeline.DataSegment) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) SegmentId(org.apache.druid.timeline.SegmentId) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DruidDataSource(org.apache.druid.client.DruidDataSource) MetadataRuleManager(org.apache.druid.metadata.MetadataRuleManager) Intervals(org.apache.druid.java.util.common.Intervals) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) Duration(org.joda.time.Duration) ZkEnablementConfig(org.apache.druid.curator.ZkEnablementConfig) DataSourcesSnapshot(org.apache.druid.client.DataSourcesSnapshot) AtomicReference(java.util.concurrent.atomic.AtomicReference) ScheduledExecutorFactory(org.apache.druid.java.util.common.concurrent.ScheduledExecutorFactory) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Nullable(javax.annotation.Nullable) Before(org.junit.Before) Capture(org.easymock.Capture) SegmentsMetadataManager(org.apache.druid.metadata.SegmentsMetadataManager) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) CuratorTestBase(org.apache.druid.curator.CuratorTestBase) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) CoordinatorCustomDuty(org.apache.druid.server.coordinator.duty.CoordinatorCustomDuty) DruidNode(org.apache.druid.server.DruidNode) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) Assert(org.junit.Assert) Collections(java.util.Collections) BatchServerInventoryView(org.apache.druid.client.BatchServerInventoryView) KillSupervisorsCustomDuty(org.apache.druid.server.coordinator.duty.KillSupervisorsCustomDuty) Duration(org.joda.time.Duration) CompactSegments(org.apache.druid.server.coordinator.duty.CompactSegments) CoordinatorDuty(org.apache.druid.server.coordinator.duty.CoordinatorDuty) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) DruidNode(org.apache.druid.server.DruidNode) CoordinatorCustomDutyGroup(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroup) CoordinatorCustomDutyGroups(org.apache.druid.server.coordinator.duty.CoordinatorCustomDutyGroups) NoopServiceAnnouncer(org.apache.druid.curator.discovery.NoopServiceAnnouncer) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 9 with ServiceEmitter

use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.

the class EmitClusterStatsAndMetrics method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    stats.forEachTieredStat("assignedCount", (final String tier, final long count) -> {
        log.info("[%s] : Assigned %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/assigned/count", tier, count);
    });
    stats.forEachTieredStat("droppedCount", (final String tier, final long count) -> {
        log.info("[%s] : Dropped %s segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/dropped/count", tier, count);
    });
    emitTieredStats(emitter, "segment/cost/raw", stats, "initialCost");
    emitTieredStats(emitter, "segment/cost/normalization", stats, "normalization");
    emitTieredStats(emitter, "segment/moved/count", stats, "movedCount");
    emitTieredStats(emitter, "segment/deleted/count", stats, "deletedCount");
    stats.forEachTieredStat("normalizedInitialCostTimesOneThousand", (final String tier, final long count) -> {
        emitTieredStat(emitter, "segment/cost/normalized", tier, count / 1000d);
    });
    stats.forEachTieredStat("unneededCount", (final String tier, final long count) -> {
        log.info("[%s] : Removed %s unneeded segments among %,d servers", tier, count, cluster.getHistoricalsByTier(tier).size());
        emitTieredStat(emitter, "segment/unneeded/count", tier, count);
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStat("overShadowedCount")));
    stats.forEachTieredStat("movedCount", (final String tier, final long count) -> {
        log.info("[%s] : Moved %,d segment(s)", tier, count);
    });
    stats.forEachTieredStat("unmovedCount", (final String tier, final long count) -> {
        log.info("[%s] : Let alone %,d segment(s)", tier, count);
    });
    log.info("Load Queues:");
    for (Iterable<ServerHolder> serverHolders : cluster.getSortedHistoricalsByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType().toString(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
            stats.addToTieredStat(TOTAL_CAPACITY, server.getTier(), server.getMaxSize());
            stats.addToTieredStat(TOTAL_HISTORICAL_COUNT, server.getTier(), 1);
        }
    }
    params.getDatabaseRuleManager().getAllRules().values().forEach(rules -> rules.forEach(rule -> {
        if (rule instanceof LoadRule) {
            ((LoadRule) rule).getTieredReplicants().forEach((tier, replica) -> stats.accumulateMaxTieredStat(MAX_REPLICATION_FACTOR, tier, replica));
        }
    }));
    emitTieredStats(emitter, "tier/required/capacity", stats, LoadRule.REQUIRED_CAPACITY);
    emitTieredStats(emitter, "tier/total/capacity", stats, TOTAL_CAPACITY);
    emitTieredStats(emitter, "tier/replication/factor", stats, MAX_REPLICATION_FACTOR);
    emitTieredStats(emitter, "tier/historical/count", stats, TOTAL_HISTORICAL_COUNT);
    // Emit coordinator metrics
    params.getLoadManagementPeons().forEach((final String serverName, final LoadQueuePeon queuePeon) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    });
    coordinator.computeNumsUnavailableUsedSegmentsPerDataSource().object2IntEntrySet().forEach((final Object2IntMap.Entry<String> entry) -> {
        final String dataSource = entry.getKey();
        final int numUnavailableUsedSegmentsInDataSource = entry.getIntValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/unavailable/count", numUnavailableUsedSegmentsInDataSource));
    });
    coordinator.computeUnderReplicationCountsPerDataSourcePerTier().forEach((final String tier, final Object2LongMap<String> underReplicationCountsPerDataSource) -> {
        for (final Object2LongMap.Entry<String> entry : underReplicationCountsPerDataSource.object2LongEntrySet()) {
            final String dataSource = entry.getKey();
            final long underReplicationCount = entry.getLongValue();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/underReplicated/count", underReplicationCount));
        }
    });
    emitter.emit(new ServiceMetricEvent.Builder().build("compact/task/count", stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/maxSlot/count", stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT)));
    emitter.emit(new ServiceMetricEvent.Builder().build("compactTask/availableSlot/count", stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT)));
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/waitCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/skipCompact/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_SIZE_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/bytes", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_COUNT_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/compacted/count", count));
    });
    stats.forEachDataSourceStat(CompactSegments.TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, (final String dataSource, final long count) -> {
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("interval/compacted/count", count));
    });
    // Emit segment metrics
    params.getUsedSegmentsTimelinesPerDataSource().forEach((String dataSource, VersionedIntervalTimeline<String, DataSegment> dataSourceWithUsedSegments) -> {
        long totalSizeOfUsedSegments = dataSourceWithUsedSegments.iterateAllObjects().stream().mapToLong(DataSegment::getSize).sum();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", totalSizeOfUsedSegments));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", dataSourceWithUsedSegments.getNumObjects()));
    });
    // Emit coordinator runtime stats
    emitDutyStats(emitter, "coordinator/time", stats, "runtime");
    return params;
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) Logger(org.apache.druid.java.util.common.logger.Logger) DruidMetrics(org.apache.druid.query.DruidMetrics) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) DruidCoordinator(org.apache.druid.server.coordinator.DruidCoordinator) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) Object2IntMap(it.unimi.dsi.fastutil.objects.Object2IntMap) ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter) DataSegment(org.apache.druid.timeline.DataSegment) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Object2LongMap(it.unimi.dsi.fastutil.objects.Object2LongMap) DruidCluster(org.apache.druid.server.coordinator.DruidCluster) DataSegment(org.apache.druid.timeline.DataSegment) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) LoadQueuePeon(org.apache.druid.server.coordinator.LoadQueuePeon) LoadRule(org.apache.druid.server.coordinator.rules.LoadRule) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 10 with ServiceEmitter

use of org.apache.druid.java.util.emitter.service.ServiceEmitter in project druid by druid-io.

the class KillRules method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    long currentTimeMillis = System.currentTimeMillis();
    if ((lastKillTime + period) < currentTimeMillis) {
        lastKillTime = currentTimeMillis;
        long timestamp = currentTimeMillis - retainDuration;
        try {
            int ruleRemoved = params.getDatabaseRuleManager().removeRulesForEmptyDatasourcesOlderThan(timestamp);
            ServiceEmitter emitter = params.getEmitter();
            emitter.emit(new ServiceMetricEvent.Builder().build("metadata/kill/rule/count", ruleRemoved));
            log.info("Finished running KillRules duty. Removed %,d rule", ruleRemoved);
        } catch (Exception e) {
            log.error(e, "Failed to kill rules metadata");
        }
    }
    return params;
}
Also used : ServiceEmitter(org.apache.druid.java.util.emitter.service.ServiceEmitter)

Aggregations

ServiceEmitter (org.apache.druid.java.util.emitter.service.ServiceEmitter)45 Test (org.junit.Test)24 Before (org.junit.Before)9 Event (org.apache.druid.java.util.emitter.core.Event)8 CachingEmitter (org.apache.druid.query.CachingEmitter)8 DefaultQueryMetricsTest (org.apache.druid.query.DefaultQueryMetricsTest)8 ArrayList (java.util.ArrayList)7 NoopServiceEmitter (org.apache.druid.server.metrics.NoopServiceEmitter)7 Map (java.util.Map)6 ConcurrentMap (java.util.concurrent.ConcurrentMap)6 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)5 ImmutableMap (com.google.common.collect.ImmutableMap)5 List (java.util.List)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)5 CountDownLatch (java.util.concurrent.CountDownLatch)5 ServiceMetricEvent (org.apache.druid.java.util.emitter.service.ServiceMetricEvent)5 DruidNode (org.apache.druid.server.DruidNode)5 DataSegment (org.apache.druid.timeline.DataSegment)5 Interval (org.joda.time.Interval)5 ImmutableSet (com.google.common.collect.ImmutableSet)4