Search in sources :

Example 1 with DruidDataSource

use of io.druid.client.DruidDataSource in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorRun.

@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
    String dataSource = "dataSource1";
    String tier = "hot";
    // Setup MetadataRuleManager
    Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
    EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
    metadataRuleManager.stop();
    EasyMock.expectLastCall().once();
    EasyMock.replay(metadataRuleManager);
    // Setup MetadataSegmentManager
    DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
    final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
    druidDataSources[0].addSegment("0", dataSegment);
    EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
    EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
    EasyMock.replay(databaseSegmentManager);
    ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
    EasyMock.replay(immutableDruidDataSource);
    // Setup ServerInventoryView
    druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
    loadManagementPeons.put("server1", loadQueuePeon);
    EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(serverInventoryView);
    coordinator.start();
    // Wait for this coordinator to become leader
    leaderAnnouncerLatch.await();
    // This coordinator should be leader by now
    Assert.assertTrue(coordinator.isLeader());
    Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
    final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
    pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {

        @Override
        public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
            if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                //Coordinator should try to assign segment to druidServer historical
                //Simulate historical loading segment
                druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
                assignSegmentLatch.countDown();
            }
        }
    });
    pathChildrenCache.start();
    assignSegmentLatch.await();
    Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
    curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
    // Wait for coordinator thread to run so that replication status is updated
    while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
        Thread.sleep(50);
    }
    Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
    Assert.assertEquals(1, segmentAvailability.size());
    Assert.assertEquals(0L, segmentAvailability.get(dataSource));
    while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
        Thread.sleep(50);
    }
    // wait historical data to be updated
    long startMillis = System.currentTimeMillis();
    long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
    while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
        Thread.sleep(100);
    }
    Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
    Assert.assertNotNull(replicationStatus);
    Assert.assertEquals(1, replicationStatus.entrySet().size());
    CountingMap<String> dataSourceMap = replicationStatus.get(tier);
    Assert.assertNotNull(dataSourceMap);
    Assert.assertEquals(1, dataSourceMap.size());
    Assert.assertNotNull(dataSourceMap.get(dataSource));
    // The load rules asks for 2 replicas, therefore 1 replica should still be pending
    while (dataSourceMap.get(dataSource).get() != 1L) {
        Thread.sleep(50);
    }
    coordinator.stop();
    leaderUnannouncerLatch.await();
    Assert.assertFalse(coordinator.isLeader());
    Assert.assertNull(coordinator.getCurrentLeader());
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(metadataRuleManager);
}
Also used : ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) DruidServer(io.druid.client.DruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) CountingMap(io.druid.collections.CountingMap) CuratorFramework(org.apache.curator.framework.CuratorFramework) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) CountingMap(io.druid.collections.CountingMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 2 with DruidDataSource

use of io.druid.client.DruidDataSource in project druid by druid-io.

the class DatasourcesResourceTest method setUp.

@Before
public void setUp() {
    request = EasyMock.createStrictMock(HttpServletRequest.class);
    inventoryView = EasyMock.createStrictMock(CoordinatorServerView.class);
    server = EasyMock.createStrictMock(DruidServer.class);
    dataSegmentList = new ArrayList<>();
    dataSegmentList.add(new DataSegment("datasource1", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 10));
    dataSegmentList.add(new DataSegment("datasource1", new Interval("2010-01-22/P1D"), null, null, null, null, null, 0x9, 20));
    dataSegmentList.add(new DataSegment("datasource2", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 30));
    listDataSources = new ArrayList<>();
    listDataSources.add(new DruidDataSource("datasource1", new HashMap()).addSegment("part1", dataSegmentList.get(0)));
    listDataSources.add(new DruidDataSource("datasource2", new HashMap()).addSegment("part1", dataSegmentList.get(1)));
}
Also used : HttpServletRequest(javax.servlet.http.HttpServletRequest) HashMap(java.util.HashMap) DruidServer(io.druid.client.DruidServer) CoordinatorServerView(io.druid.client.CoordinatorServerView) DataSegment(io.druid.timeline.DataSegment) DruidDataSource(io.druid.client.DruidDataSource) Interval(org.joda.time.Interval) Before(org.junit.Before)

Example 3 with DruidDataSource

use of io.druid.client.DruidDataSource in project druid by druid-io.

the class DatasourcesResourceTest method testGetFullQueryableDataSources.

@Test
public void testGetFullQueryableDataSources() throws Exception {
    EasyMock.expect(server.getDataSources()).andReturn(ImmutableList.of(listDataSources.get(0), listDataSources.get(1))).atLeastOnce();
    EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).atLeastOnce();
    EasyMock.replay(inventoryView, server);
    DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null, new AuthConfig());
    Response response = datasourcesResource.getQueryableDataSources("full", null, request);
    Set<DruidDataSource> result = (Set<DruidDataSource>) response.getEntity();
    DruidDataSource[] resultantDruidDataSources = new DruidDataSource[result.size()];
    result.toArray(resultantDruidDataSources);
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(2, resultantDruidDataSources.length);
    Assert.assertArrayEquals(listDataSources.toArray(), resultantDruidDataSources);
    response = datasourcesResource.getQueryableDataSources(null, null, request);
    List<String> result1 = (List<String>) response.getEntity();
    Assert.assertEquals(200, response.getStatus());
    Assert.assertEquals(2, result1.size());
    Assert.assertTrue(result1.contains("datasource1"));
    Assert.assertTrue(result1.contains("datasource2"));
    EasyMock.verify(inventoryView, server);
}
Also used : Response(javax.ws.rs.core.Response) TreeSet(java.util.TreeSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) AuthConfig(io.druid.server.security.AuthConfig) DruidDataSource(io.druid.client.DruidDataSource) Test(org.junit.Test)

Example 4 with DruidDataSource

use of io.druid.client.DruidDataSource in project druid by druid-io.

the class DruidSchema method start.

@LifecycleStart
public void start() {
    cacheExec.submit(new Runnable() {

        @Override
        public void run() {
            try {
                while (!Thread.currentThread().isInterrupted()) {
                    final Set<String> dataSources = Sets.newHashSet();
                    try {
                        synchronized (lock) {
                            final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
                            while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
                                lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
                            }
                            dataSources.addAll(dataSourcesNeedingRefresh);
                            dataSourcesNeedingRefresh.clear();
                            lastRefresh = System.currentTimeMillis();
                            refreshImmediately = false;
                        }
                        // Refresh dataSources.
                        for (final String dataSource : dataSources) {
                            log.debug("Refreshing metadata for dataSource[%s].", dataSource);
                            final long startTime = System.currentTimeMillis();
                            final DruidTable druidTable = computeTable(dataSource);
                            if (druidTable == null) {
                                if (tables.remove(dataSource) != null) {
                                    log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
                                }
                            } else {
                                tables.put(dataSource, druidTable);
                                log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
                            }
                        }
                        initializationLatch.countDown();
                    } catch (InterruptedException e) {
                        // Fall through.
                        throw e;
                    } catch (Exception e) {
                        log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
                        synchronized (lock) {
                            // Add dataSources back to the refresh list.
                            dataSourcesNeedingRefresh.addAll(dataSources);
                            lock.notifyAll();
                        }
                    }
                }
            } catch (InterruptedException e) {
            // Just exit.
            } catch (Throwable e) {
                // Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
                // OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
                log.makeAlert(e, "Metadata refresh failed permanently").emit();
                throw e;
            } finally {
                log.info("Metadata refresh stopped.");
            }
        }
    });
    serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {

        @Override
        public ServerView.CallbackAction segmentViewInitialized() {
            synchronized (lock) {
                isServerViewInitialized = true;
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                if (!tables.containsKey(segment.getDataSource())) {
                    refreshImmediately = true;
                }
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }
    });
    serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {

        @Override
        public ServerView.CallbackAction serverRemoved(DruidServer server) {
            final List<String> dataSourceNames = Lists.newArrayList();
            for (DruidDataSource druidDataSource : server.getDataSources()) {
                dataSourceNames.add(druidDataSource.getName());
            }
            synchronized (lock) {
                dataSourcesNeedingRefresh.addAll(dataSourceNames);
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }
    });
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) DruidTable(io.druid.sql.calcite.table.DruidTable) DruidServer(io.druid.client.DruidServer) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) DruidDataSource(io.druid.client.DruidDataSource) DateTime(org.joda.time.DateTime) ServerView(io.druid.client.ServerView) TimelineServerView(io.druid.client.TimelineServerView) List(java.util.List) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Example 5 with DruidDataSource

use of io.druid.client.DruidDataSource in project druid by druid-io.

the class SQLMetadataSegmentManager method removeSegment.

@Override
public boolean removeSegment(String ds, final String segmentID) {
    try {
        connector.getDBI().withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                handle.createStatement(String.format("UPDATE %s SET used=false WHERE id = :segmentID", getSegmentsTable())).bind("segmentID", segmentID).execute();
                return null;
            }
        });
        ConcurrentHashMap<String, DruidDataSource> dataSourceMap = dataSources.get();
        if (!dataSourceMap.containsKey(ds)) {
            log.warn("Cannot find datasource %s", ds);
            return false;
        }
        DruidDataSource dataSource = dataSourceMap.get(ds);
        dataSource.removePartition(segmentID);
        if (dataSource.isEmpty()) {
            dataSourceMap.remove(ds);
        }
    } catch (Exception e) {
        log.error(e, e.toString());
        return false;
    }
    return true;
}
Also used : DruidDataSource(io.druid.client.DruidDataSource) SQLException(java.sql.SQLException) IOException(java.io.IOException) Handle(org.skife.jdbi.v2.Handle)

Aggregations

DruidDataSource (io.druid.client.DruidDataSource)26 DataSegment (io.druid.timeline.DataSegment)19 Map (java.util.Map)9 GET (javax.ws.rs.GET)9 Produces (javax.ws.rs.Produces)9 Interval (org.joda.time.Interval)9 DruidServer (io.druid.client.DruidServer)8 Path (javax.ws.rs.Path)8 HashMap (java.util.HashMap)7 Set (java.util.Set)6 Response (javax.ws.rs.core.Response)6 Test (org.junit.Test)6 ResourceFilters (com.sun.jersey.spi.container.ResourceFilters)5 List (java.util.List)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 AuthConfig (io.druid.server.security.AuthConfig)4 AuthorizationInfo (io.druid.server.security.AuthorizationInfo)4 ImmutableDruidDataSource (io.druid.client.ImmutableDruidDataSource)3 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)3 Access (io.druid.server.security.Access)3