Search in sources :

Example 1 with ImmutableDruidDataSource

use of io.druid.client.ImmutableDruidDataSource in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorRun.

@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
    String dataSource = "dataSource1";
    String tier = "hot";
    // Setup MetadataRuleManager
    Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
    EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
    metadataRuleManager.stop();
    EasyMock.expectLastCall().once();
    EasyMock.replay(metadataRuleManager);
    // Setup MetadataSegmentManager
    DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
    final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
    druidDataSources[0].addSegment("0", dataSegment);
    EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
    EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
    EasyMock.replay(databaseSegmentManager);
    ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
    EasyMock.replay(immutableDruidDataSource);
    // Setup ServerInventoryView
    druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
    loadManagementPeons.put("server1", loadQueuePeon);
    EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(serverInventoryView);
    coordinator.start();
    // Wait for this coordinator to become leader
    leaderAnnouncerLatch.await();
    // This coordinator should be leader by now
    Assert.assertTrue(coordinator.isLeader());
    Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
    final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
    pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {

        @Override
        public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
            if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                //Coordinator should try to assign segment to druidServer historical
                //Simulate historical loading segment
                druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
                assignSegmentLatch.countDown();
            }
        }
    });
    pathChildrenCache.start();
    assignSegmentLatch.await();
    Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
    curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
    // Wait for coordinator thread to run so that replication status is updated
    while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
        Thread.sleep(50);
    }
    Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
    Assert.assertEquals(1, segmentAvailability.size());
    Assert.assertEquals(0L, segmentAvailability.get(dataSource));
    while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
        Thread.sleep(50);
    }
    // wait historical data to be updated
    long startMillis = System.currentTimeMillis();
    long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
    while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
        Thread.sleep(100);
    }
    Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
    Assert.assertNotNull(replicationStatus);
    Assert.assertEquals(1, replicationStatus.entrySet().size());
    CountingMap<String> dataSourceMap = replicationStatus.get(tier);
    Assert.assertNotNull(dataSourceMap);
    Assert.assertEquals(1, dataSourceMap.size());
    Assert.assertNotNull(dataSourceMap.get(dataSource));
    // The load rules asks for 2 replicas, therefore 1 replica should still be pending
    while (dataSourceMap.get(dataSource).get() != 1L) {
        Thread.sleep(50);
    }
    coordinator.stop();
    leaderUnannouncerLatch.await();
    Assert.assertFalse(coordinator.isLeader());
    Assert.assertNull(coordinator.getCurrentLeader());
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(metadataRuleManager);
}
Also used : ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) DruidServer(io.druid.client.DruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) CountingMap(io.druid.collections.CountingMap) CuratorFramework(org.apache.curator.framework.CuratorFramework) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) CountingMap(io.druid.collections.CountingMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 2 with ImmutableDruidDataSource

use of io.druid.client.ImmutableDruidDataSource in project druid by druid-io.

the class DruidCoordinatorCleanupUnneeded method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    Set<DataSegment> availableSegments = params.getAvailableSegments();
    DruidCluster cluster = params.getDruidCluster();
    // cleanup before it finished polling the metadata storage for available segments for the first time.
    if (!availableSegments.isEmpty()) {
        for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
            for (ServerHolder serverHolder : serverHolders) {
                ImmutableDruidServer server = serverHolder.getServer();
                for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
                    for (DataSegment segment : dataSource.getSegments()) {
                        if (!availableSegments.contains(segment)) {
                            LoadQueuePeon queuePeon = params.getLoadManagementPeons().get(server.getName());
                            if (!queuePeon.getSegmentsToDrop().contains(segment)) {
                                queuePeon.dropSegment(segment, new LoadPeonCallback() {

                                    @Override
                                    public void execute() {
                                    }
                                });
                                stats.addToTieredStat("unneededCount", server.getTier(), 1);
                            }
                        }
                    }
                }
            }
        }
    } else {
        log.info("Found 0 availableSegments, skipping the cleanup of segments from historicals. This is done to prevent a race condition in which the coordinator would drop all segments if it started running cleanup before it finished polling the metadata storage for available segments for the first time.");
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) LoadPeonCallback(io.druid.server.coordinator.LoadPeonCallback) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) ServerHolder(io.druid.server.coordinator.ServerHolder) LoadQueuePeon(io.druid.server.coordinator.LoadQueuePeon) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 3 with ImmutableDruidDataSource

use of io.druid.client.ImmutableDruidDataSource in project druid by druid-io.

the class DruidCoordinatorCleanupOvershadowed method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();
    // Unservice old partitions if we've had enough time to make sure we aren't flapping with old data
    if (params.hasDeletionWaitTimeElapsed()) {
        DruidCluster cluster = params.getDruidCluster();
        Map<String, VersionedIntervalTimeline<String, DataSegment>> timelines = Maps.newHashMap();
        for (MinMaxPriorityQueue<ServerHolder> serverHolders : cluster.getSortedServersByTier()) {
            for (ServerHolder serverHolder : serverHolders) {
                ImmutableDruidServer server = serverHolder.getServer();
                for (ImmutableDruidDataSource dataSource : server.getDataSources()) {
                    VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSource.getName());
                    if (timeline == null) {
                        timeline = new VersionedIntervalTimeline<>(Comparators.comparable());
                        timelines.put(dataSource.getName(), timeline);
                    }
                    for (DataSegment segment : dataSource.getSegments()) {
                        timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
                    }
                }
            }
        }
        //Remove all segments in db that are overshadowed by served segments
        for (DataSegment dataSegment : params.getAvailableSegments()) {
            VersionedIntervalTimeline<String, DataSegment> timeline = timelines.get(dataSegment.getDataSource());
            if (timeline != null && timeline.isOvershadowed(dataSegment.getInterval(), dataSegment.getVersion())) {
                coordinator.removeSegment(dataSegment);
                stats.addToGlobalStat("overShadowedCount", 1);
            }
        }
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : CoordinatorStats(io.druid.server.coordinator.CoordinatorStats) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) ServerHolder(io.druid.server.coordinator.ServerHolder) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) DruidCluster(io.druid.server.coordinator.DruidCluster) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Aggregations

ImmutableDruidDataSource (io.druid.client.ImmutableDruidDataSource)3 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)3 DataSegment (io.druid.timeline.DataSegment)3 CoordinatorStats (io.druid.server.coordinator.CoordinatorStats)2 DruidCluster (io.druid.server.coordinator.DruidCluster)2 ServerHolder (io.druid.server.coordinator.ServerHolder)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 DruidDataSource (io.druid.client.DruidDataSource)1 DruidServer (io.druid.client.DruidServer)1 CountingMap (io.druid.collections.CountingMap)1 LoadPeonCallback (io.druid.server.coordinator.LoadPeonCallback)1 LoadQueuePeon (io.druid.server.coordinator.LoadQueuePeon)1 ForeverLoadRule (io.druid.server.coordinator.rules.ForeverLoadRule)1 Rule (io.druid.server.coordinator.rules.Rule)1 VersionedIntervalTimeline (io.druid.timeline.VersionedIntervalTimeline)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 CuratorFramework (org.apache.curator.framework.CuratorFramework)1