Search in sources :

Example 1 with CountingMap

use of io.druid.collections.CountingMap in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorRun.

@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
    String dataSource = "dataSource1";
    String tier = "hot";
    // Setup MetadataRuleManager
    Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
    EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
    metadataRuleManager.stop();
    EasyMock.expectLastCall().once();
    EasyMock.replay(metadataRuleManager);
    // Setup MetadataSegmentManager
    DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
    final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
    druidDataSources[0].addSegment("0", dataSegment);
    EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
    EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
    EasyMock.replay(databaseSegmentManager);
    ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
    EasyMock.replay(immutableDruidDataSource);
    // Setup ServerInventoryView
    druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
    loadManagementPeons.put("server1", loadQueuePeon);
    EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(serverInventoryView);
    coordinator.start();
    // Wait for this coordinator to become leader
    leaderAnnouncerLatch.await();
    // This coordinator should be leader by now
    Assert.assertTrue(coordinator.isLeader());
    Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
    final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
    pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {

        @Override
        public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
            if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                //Coordinator should try to assign segment to druidServer historical
                //Simulate historical loading segment
                druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
                assignSegmentLatch.countDown();
            }
        }
    });
    pathChildrenCache.start();
    assignSegmentLatch.await();
    Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
    curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
    // Wait for coordinator thread to run so that replication status is updated
    while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
        Thread.sleep(50);
    }
    Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
    Assert.assertEquals(1, segmentAvailability.size());
    Assert.assertEquals(0L, segmentAvailability.get(dataSource));
    while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
        Thread.sleep(50);
    }
    // wait historical data to be updated
    long startMillis = System.currentTimeMillis();
    long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
    while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
        Thread.sleep(100);
    }
    Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
    Assert.assertNotNull(replicationStatus);
    Assert.assertEquals(1, replicationStatus.entrySet().size());
    CountingMap<String> dataSourceMap = replicationStatus.get(tier);
    Assert.assertNotNull(dataSourceMap);
    Assert.assertEquals(1, dataSourceMap.size());
    Assert.assertNotNull(dataSourceMap.get(dataSource));
    // The load rules asks for 2 replicas, therefore 1 replica should still be pending
    while (dataSourceMap.get(dataSource).get() != 1L) {
        Thread.sleep(50);
    }
    coordinator.stop();
    leaderUnannouncerLatch.await();
    Assert.assertFalse(coordinator.isLeader());
    Assert.assertNull(coordinator.getCurrentLeader());
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(metadataRuleManager);
}
Also used : ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) DruidServer(io.druid.client.DruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) CountingMap(io.druid.collections.CountingMap) CuratorFramework(org.apache.curator.framework.CuratorFramework) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) CountingMap(io.druid.collections.CountingMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 2 with CountingMap

use of io.druid.collections.CountingMap in project druid by druid-io.

the class DruidCoordinator method getReplicationStatus.

public Map<String, CountingMap<String>> getReplicationStatus() {
    final Map<String, CountingMap<String>> retVal = Maps.newHashMap();
    if (segmentReplicantLookup == null) {
        return retVal;
    }
    final DateTime now = new DateTime();
    for (DataSegment segment : getAvailableDataSegments()) {
        List<Rule> rules = metadataRuleManager.getRulesWithDefault(segment.getDataSource());
        for (Rule rule : rules) {
            if (rule instanceof LoadRule && rule.appliesTo(segment, now)) {
                for (Map.Entry<String, Integer> entry : ((LoadRule) rule).getTieredReplicants().entrySet()) {
                    CountingMap<String> dataSourceMap = retVal.get(entry.getKey());
                    if (dataSourceMap == null) {
                        dataSourceMap = new CountingMap<>();
                        retVal.put(entry.getKey(), dataSourceMap);
                    }
                    int diff = Math.max(entry.getValue() - segmentReplicantLookup.getTotalReplicants(segment.getIdentifier(), entry.getKey()), 0);
                    dataSourceMap.add(segment.getDataSource(), diff);
                }
                break;
            }
        }
    }
    return retVal;
}
Also used : DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) CountingMap(io.druid.collections.CountingMap) LoadRule(io.druid.server.coordinator.rules.LoadRule) LoadRule(io.druid.server.coordinator.rules.LoadRule) Rule(io.druid.server.coordinator.rules.Rule) Map(java.util.Map) CountingMap(io.druid.collections.CountingMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 3 with CountingMap

use of io.druid.collections.CountingMap in project druid by druid-io.

the class DruidCoordinatorLogger method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DruidCluster cluster = params.getDruidCluster();
    CoordinatorStats stats = params.getCoordinatorStats();
    ServiceEmitter emitter = params.getEmitter();
    Map<String, AtomicLong> assigned = stats.getPerTierStats().get("assignedCount");
    if (assigned != null) {
        for (Map.Entry<String, AtomicLong> entry : assigned.entrySet()) {
            log.info("[%s] : Assigned %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
        }
    }
    emitTieredStats(emitter, "segment/assigned/count", assigned);
    Map<String, AtomicLong> dropped = stats.getPerTierStats().get("droppedCount");
    if (dropped != null) {
        for (Map.Entry<String, AtomicLong> entry : dropped.entrySet()) {
            log.info("[%s] : Dropped %s segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
        }
    }
    emitTieredStats(emitter, "segment/dropped/count", dropped);
    emitTieredStats(emitter, "segment/cost/raw", stats.getPerTierStats().get("initialCost"));
    emitTieredStats(emitter, "segment/cost/normalization", stats.getPerTierStats().get("normalization"));
    emitTieredStats(emitter, "segment/moved/count", stats.getPerTierStats().get("movedCount"));
    emitTieredStats(emitter, "segment/deleted/count", stats.getPerTierStats().get("deletedCount"));
    Map<String, AtomicLong> normalized = stats.getPerTierStats().get("normalizedInitialCostTimesOneThousand");
    if (normalized != null) {
        emitTieredStats(emitter, "segment/cost/normalized", Maps.transformEntries(normalized, new Maps.EntryTransformer<String, AtomicLong, Number>() {

            @Override
            public Number transformEntry(String key, AtomicLong value) {
                return value.doubleValue() / 1000d;
            }
        }));
    }
    Map<String, AtomicLong> unneeded = stats.getPerTierStats().get("unneededCount");
    if (unneeded != null) {
        for (Map.Entry<String, AtomicLong> entry : unneeded.entrySet()) {
            log.info("[%s] : Removed %s unneeded segments among %,d servers", entry.getKey(), entry.getValue().get(), cluster.get(entry.getKey()).size());
        }
    }
    emitTieredStats(emitter, "segment/unneeded/count", stats.getPerTierStats().get("unneededCount"));
    emitter.emit(new ServiceMetricEvent.Builder().build("segment/overShadowed/count", stats.getGlobalStats().get("overShadowedCount")));
    Map<String, AtomicLong> moved = stats.getPerTierStats().get("movedCount");
    if (moved != null) {
        for (Map.Entry<String, AtomicLong> entry : moved.entrySet()) {
            log.info("[%s] : Moved %,d segment(s)", entry.getKey(), entry.getValue().get());
        }
    }
    final Map<String, AtomicLong> unmoved = stats.getPerTierStats().get("unmovedCount");
    if (unmoved != null) {
        for (Map.Entry<String, AtomicLong> entry : unmoved.entrySet()) {
            log.info("[%s] : Let alone %,d segment(s)", entry.getKey(), entry.getValue().get());
        }
    }
    log.info("Load Queues:");
    for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
        for (ServerHolder serverHolder : serverHolders) {
            ImmutableDruidServer server = serverHolder.getServer();
            LoadQueuePeon queuePeon = serverHolder.getPeon();
            log.info("Server[%s, %s, %s] has %,d left to load, %,d left to drop, %,d bytes queued, %,d bytes served.", server.getName(), server.getType(), server.getTier(), queuePeon.getSegmentsToLoad().size(), queuePeon.getSegmentsToDrop().size(), queuePeon.getLoadQueueSize(), server.getCurrSize());
            if (log.isDebugEnabled()) {
                for (DataSegment segment : queuePeon.getSegmentsToLoad()) {
                    log.debug("Segment to load[%s]", segment);
                }
                for (DataSegment segment : queuePeon.getSegmentsToDrop()) {
                    log.debug("Segment to drop[%s]", segment);
                }
            }
        }
    }
    // Emit coordinator metrics
    final Set<Map.Entry<String, LoadQueuePeon>> peonEntries = params.getLoadManagementPeons().entrySet();
    for (Map.Entry<String, LoadQueuePeon> entry : peonEntries) {
        String serverName = entry.getKey();
        LoadQueuePeon queuePeon = entry.getValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/size", queuePeon.getLoadQueueSize()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/failed", queuePeon.getAndResetFailedAssignCount()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/loadQueue/count", queuePeon.getSegmentsToLoad().size()));
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.SERVER, serverName).build("segment/dropQueue/count", queuePeon.getSegmentsToDrop().size()));
    }
    for (Map.Entry<String, AtomicLong> entry : coordinator.getSegmentAvailability().entrySet()) {
        String datasource = entry.getKey();
        Long count = entry.getValue().get();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/unavailable/count", count));
    }
    for (Map.Entry<String, CountingMap<String>> entry : coordinator.getReplicationStatus().entrySet()) {
        String tier = entry.getKey();
        CountingMap<String> datasourceAvailabilities = entry.getValue();
        for (Map.Entry<String, AtomicLong> datasourceAvailability : datasourceAvailabilities.entrySet()) {
            String datasource = datasourceAvailability.getKey();
            Long count = datasourceAvailability.getValue().get();
            emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.TIER, tier).setDimension(DruidMetrics.DATASOURCE, datasource).build("segment/underReplicated/count", count));
        }
    }
    // Emit segment metrics
    CountingMap<String> segmentSizes = new CountingMap<String>();
    CountingMap<String> segmentCounts = new CountingMap<String>();
    for (DruidDataSource dataSource : params.getDataSources()) {
        for (DataSegment segment : dataSource.getSegments()) {
            segmentSizes.add(dataSource.getName(), segment.getSize());
            segmentCounts.add(dataSource.getName(), 1L);
        }
    }
    for (Map.Entry<String, Long> entry : segmentSizes.snapshot().entrySet()) {
        String dataSource = entry.getKey();
        Long size = entry.getValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/size", size));
    }
    for (Map.Entry<String, Long> entry : segmentCounts.snapshot().entrySet()) {
        String dataSource = entry.getKey();
        Long count = entry.getValue();
        emitter.emit(new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, dataSource).build("segment/count", count));
    }
    return params;
}
Also used : ServiceEmitter(com.metamx.emitter.service.ServiceEmitter) CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) DataSegment(io.druid.timeline.DataSegment) LoadQueuePeon(io.druid.server.coordinator.LoadQueuePeon) DruidCluster(io.druid.server.coordinator.DruidCluster) DruidDataSource(io.druid.client.DruidDataSource) CountingMap(io.druid.collections.CountingMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) ServerHolder(io.druid.server.coordinator.ServerHolder) AtomicLong(java.util.concurrent.atomic.AtomicLong) ServiceMetricEvent(com.metamx.emitter.service.ServiceMetricEvent) CountingMap(io.druid.collections.CountingMap) Map(java.util.Map) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 4 with CountingMap

use of io.druid.collections.CountingMap in project druid by druid-io.

the class DruidCoordinator method getSegmentAvailability.

public CountingMap<String> getSegmentAvailability() {
    final CountingMap<String> retVal = new CountingMap<>();
    if (segmentReplicantLookup == null) {
        return retVal;
    }
    for (DataSegment segment : getAvailableDataSegments()) {
        int available = (segmentReplicantLookup.getTotalReplicants(segment.getIdentifier()) == 0) ? 0 : 1;
        retVal.add(segment.getDataSource(), 1 - available);
    }
    return retVal;
}
Also used : CountingMap(io.druid.collections.CountingMap) DataSegment(io.druid.timeline.DataSegment)

Aggregations

CountingMap (io.druid.collections.CountingMap)4 DataSegment (io.druid.timeline.DataSegment)4 Map (java.util.Map)3 DruidDataSource (io.druid.client.DruidDataSource)2 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)2 Rule (io.druid.server.coordinator.rules.Rule)2 ConcurrentMap (java.util.concurrent.ConcurrentMap)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 ServiceEmitter (com.metamx.emitter.service.ServiceEmitter)1 ServiceMetricEvent (com.metamx.emitter.service.ServiceMetricEvent)1 DruidServer (io.druid.client.DruidServer)1 ImmutableDruidDataSource (io.druid.client.ImmutableDruidDataSource)1 CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)1 DruidCluster (io.druid.server.coordinator.DruidCluster)1 LoadQueuePeon (io.druid.server.coordinator.LoadQueuePeon)1 ServerHolder (io.druid.server.coordinator.ServerHolder)1 ForeverLoadRule (io.druid.server.coordinator.rules.ForeverLoadRule)1 LoadRule (io.druid.server.coordinator.rules.LoadRule)1 HashMap (java.util.HashMap)1 CountDownLatch (java.util.concurrent.CountDownLatch)1