Search in sources :

Example 1 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BatchServerInventoryViewTest method setUp.

@Before
public void setUp() throws Exception {
    testingCluster = new TestingCluster(1);
    testingCluster.start();
    cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
    cf.start();
    cf.blockUntilConnected();
    cf.create().creatingParentsIfNeeded().forPath(testBasePath);
    jsonMapper = new DefaultObjectMapper();
    announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
    announcer.start();
    segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {

        @Override
        public int getSegmentsPerNode() {
            return 50;
        }
    }, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return testBasePath;
        }
    }, announcer, jsonMapper);
    segmentAnnouncer.start();
    testSegments = Sets.newConcurrentHashSet();
    for (int i = 0; i < INITIAL_SEGMENTS; i++) {
        testSegments.add(makeSegment(i));
    }
    batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {

        @Override
        public String getBase() {
            return testBasePath;
        }
    }, cf, jsonMapper, Predicates.<Pair<DruidServerMetadata, DataSegment>>alwaysTrue());
    batchServerInventoryView.start();
    inventoryUpdateCounter.set(0);
    filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {

        @Override
        public String getBase() {
            return testBasePath;
        }
    }, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {

        @Override
        public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
            return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
        }
    }) {

        @Override
        protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
            DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
            inventoryUpdateCounter.incrementAndGet();
            return server;
        }
    };
    filteredBatchServerInventoryView.start();
}
Also used : BatchServerInventoryView(io.druid.client.BatchServerInventoryView) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) BatchDataSegmentAnnouncerConfig(io.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DruidServer(io.druid.client.DruidServer) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) PotentiallyGzippedCompressionProvider(io.druid.curator.PotentiallyGzippedCompressionProvider) DataSegment(io.druid.timeline.DataSegment) Predicate(com.google.common.base.Predicate) TestingCluster(org.apache.curator.test.TestingCluster) BatchDataSegmentAnnouncer(io.druid.server.coordination.BatchDataSegmentAnnouncer) Announcer(io.druid.curator.announcement.Announcer) ZkPathsConfig(io.druid.server.initialization.ZkPathsConfig) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) BatchDataSegmentAnnouncer(io.druid.server.coordination.BatchDataSegmentAnnouncer) Nullable(javax.annotation.Nullable) Pair(io.druid.java.util.common.Pair) Before(org.junit.Before)

Example 2 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidCoordinatorTest method testMoveSegment.

@Test
public void testMoveSegment() throws Exception {
    loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
    EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
    EasyMock.replay(loadQueuePeon);
    segment = EasyMock.createNiceMock(DataSegment.class);
    EasyMock.replay(segment);
    scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
    EasyMock.replay(scheduledExecutorFactory);
    EasyMock.replay(metadataRuleManager);
    EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment", segment))).atLeastOnce();
    EasyMock.replay(druidServer);
    druidServer2 = EasyMock.createMock(DruidServer.class);
    EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment2", segment))).atLeastOnce();
    EasyMock.replay(druidServer2);
    loadManagementPeons.put("from", loadQueuePeon);
    loadManagementPeons.put("to", loadQueuePeon);
    EasyMock.expect(serverInventoryView.getInventoryManagerConfig()).andReturn(new InventoryManagerConfig() {

        @Override
        public String getContainerPath() {
            return "";
        }

        @Override
        public String getInventoryPath() {
            return "";
        }
    });
    EasyMock.replay(serverInventoryView);
    coordinator.moveSegment(druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), "dummySegment", null);
    EasyMock.verify(druidServer);
    EasyMock.verify(druidServer2);
    EasyMock.verify(loadQueuePeon);
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(metadataRuleManager);
}
Also used : ScheduledExecutorFactory(io.druid.java.util.common.concurrent.ScheduledExecutorFactory) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) DruidServer(io.druid.client.DruidServer) InventoryManagerConfig(io.druid.curator.inventory.InventoryManagerConfig) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 3 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class CostBalancerStrategyTest method setupDummyCluster.

/**
   * Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
   * Cost Balancer Strategy should assign the next segment to the server with less segments.
   */
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    // Each having having 100 segments
    for (int i = 0; i < serverCount; i++) {
        LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
        Map<String, DataSegment> segments = Maps.newHashMap();
        for (int j = 0; j < maxSegments; j++) {
            DataSegment segment = getSegment(j);
            segments.put(segment.getIdentifier(), segment);
        }
        serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
    }
    // The best server to be available for next segment assignment has only 98 Segments
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
    EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
    EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
    EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
    EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    Map<String, DataSegment> segments = Maps.newHashMap();
    for (int j = 0; j < (maxSegments - 2); j++) {
        DataSegment segment = getSegment(j);
        segments.put(segment.getIdentifier(), segment);
        EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
    }
    EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
    EasyMock.replay(druidServer);
    serverHolderList.add(new ServerHolder(druidServer, fromPeon));
    return serverHolderList;
}
Also used : DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 4 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DiskNormalizedCostBalancerStrategyTest method setupDummyCluster.

/**
   * Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
   * Cost Balancer Strategy should assign the next segment to the server with less segments.
   */
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    // Each having having 100 segments
    for (int i = 0; i < serverCount; i++) {
        LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
        Map<String, DataSegment> segments = Maps.newHashMap();
        for (int j = 0; j < maxSegments; j++) {
            DataSegment segment = getSegment(j);
            segments.put(segment.getIdentifier(), segment);
        }
        serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
    }
    // The best server to be available for next segment assignment has greater max Size
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
    EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
    EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
    EasyMock.expect(druidServer.getMaxSize()).andReturn(100000000L).anyTimes();
    EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    Map<String, DataSegment> segments = Maps.newHashMap();
    for (int j = 0; j < maxSegments; j++) {
        DataSegment segment = getSegment(j);
        segments.put(segment.getIdentifier(), segment);
        EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
    }
    EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
    EasyMock.replay(druidServer);
    serverHolderList.add(new ServerHolder(druidServer, fromPeon));
    return serverHolderList;
}
Also used : DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) ImmutableDruidServer(io.druid.client.ImmutableDruidServer)

Example 5 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchema method start.

@LifecycleStart
public void start() {
    cacheExec.submit(new Runnable() {

        @Override
        public void run() {
            try {
                while (!Thread.currentThread().isInterrupted()) {
                    final Set<String> dataSources = Sets.newHashSet();
                    try {
                        synchronized (lock) {
                            final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
                            while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
                                lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
                            }
                            dataSources.addAll(dataSourcesNeedingRefresh);
                            dataSourcesNeedingRefresh.clear();
                            lastRefresh = System.currentTimeMillis();
                            refreshImmediately = false;
                        }
                        // Refresh dataSources.
                        for (final String dataSource : dataSources) {
                            log.debug("Refreshing metadata for dataSource[%s].", dataSource);
                            final long startTime = System.currentTimeMillis();
                            final DruidTable druidTable = computeTable(dataSource);
                            if (druidTable == null) {
                                if (tables.remove(dataSource) != null) {
                                    log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
                                }
                            } else {
                                tables.put(dataSource, druidTable);
                                log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
                            }
                        }
                        initializationLatch.countDown();
                    } catch (InterruptedException e) {
                        // Fall through.
                        throw e;
                    } catch (Exception e) {
                        log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
                        synchronized (lock) {
                            // Add dataSources back to the refresh list.
                            dataSourcesNeedingRefresh.addAll(dataSources);
                            lock.notifyAll();
                        }
                    }
                }
            } catch (InterruptedException e) {
            // Just exit.
            } catch (Throwable e) {
                // Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
                // OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
                log.makeAlert(e, "Metadata refresh failed permanently").emit();
                throw e;
            } finally {
                log.info("Metadata refresh stopped.");
            }
        }
    });
    serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {

        @Override
        public ServerView.CallbackAction segmentViewInitialized() {
            synchronized (lock) {
                isServerViewInitialized = true;
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                if (!tables.containsKey(segment.getDataSource())) {
                    refreshImmediately = true;
                }
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }

        @Override
        public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
            synchronized (lock) {
                dataSourcesNeedingRefresh.add(segment.getDataSource());
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }
    });
    serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {

        @Override
        public ServerView.CallbackAction serverRemoved(DruidServer server) {
            final List<String> dataSourceNames = Lists.newArrayList();
            for (DruidDataSource druidDataSource : server.getDataSources()) {
                dataSourceNames.add(druidDataSource.getName());
            }
            synchronized (lock) {
                dataSourcesNeedingRefresh.addAll(dataSourceNames);
                lock.notifyAll();
            }
            return ServerView.CallbackAction.CONTINUE;
        }
    });
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) DruidTable(io.druid.sql.calcite.table.DruidTable) DruidServer(io.druid.client.DruidServer) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) DruidDataSource(io.druid.client.DruidDataSource) DateTime(org.joda.time.DateTime) ServerView(io.druid.client.ServerView) TimelineServerView(io.druid.client.TimelineServerView) List(java.util.List) LifecycleStart(io.druid.java.util.common.lifecycle.LifecycleStart)

Aggregations

DruidServerMetadata (io.druid.server.coordination.DruidServerMetadata)14 DataSegment (io.druid.timeline.DataSegment)12 DruidServer (io.druid.client.DruidServer)5 Test (org.junit.Test)4 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)3 Pair (io.druid.java.util.common.Pair)3 BatchDataSegmentAnnouncer (io.druid.server.coordination.BatchDataSegmentAnnouncer)3 BatchDataSegmentAnnouncerConfig (io.druid.server.initialization.BatchDataSegmentAnnouncerConfig)3 ZkPathsConfig (io.druid.server.initialization.ZkPathsConfig)3 Interval (org.joda.time.Interval)3 ServerView (io.druid.client.ServerView)2 PotentiallyGzippedCompressionProvider (io.druid.curator.PotentiallyGzippedCompressionProvider)2 Announcer (io.druid.curator.announcement.Announcer)2 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)2 Set (java.util.Set)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)2 TestingCluster (org.apache.curator.test.TestingCluster)2 DateTime (org.joda.time.DateTime)2 Before (org.junit.Before)2