Search in sources :

Example 11 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BatchServerInventoryViewTest method testRunWithFilterCallback.

@Test
public void testRunWithFilterCallback() throws Exception {
    final CountDownLatch removeCallbackLatch = new CountDownLatch(1);
    segmentAnnouncer.announceSegments(testSegments);
    waitForSync(filteredBatchServerInventoryView, testSegments);
    DruidServer server = Iterables.get(filteredBatchServerInventoryView.getInventory(), 0);
    Set<DataSegment> segments = Sets.newHashSet(server.getSegments().values());
    Assert.assertEquals(testSegments, segments);
    ServerView.SegmentCallback callback = EasyMock.createStrictMock(ServerView.SegmentCallback.class);
    Comparator<DataSegment> dataSegmentComparator = new Comparator<DataSegment>() {

        @Override
        public int compare(DataSegment o1, DataSegment o2) {
            return o1.getInterval().equals(o2.getInterval()) ? 0 : -1;
        }
    };
    EasyMock.expect(callback.segmentAdded(EasyMock.<DruidServerMetadata>anyObject(), EasyMock.cmp(makeSegment(INITIAL_SEGMENTS + 2), dataSegmentComparator, LogicalOperator.EQUAL))).andReturn(ServerView.CallbackAction.CONTINUE).times(1);
    EasyMock.expect(callback.segmentRemoved(EasyMock.<DruidServerMetadata>anyObject(), EasyMock.cmp(makeSegment(INITIAL_SEGMENTS + 2), dataSegmentComparator, LogicalOperator.EQUAL))).andAnswer(new IAnswer<ServerView.CallbackAction>() {

        @Override
        public ServerView.CallbackAction answer() throws Throwable {
            removeCallbackLatch.countDown();
            return ServerView.CallbackAction.CONTINUE;
        }
    }).times(1);
    EasyMock.replay(callback);
    filteredBatchServerInventoryView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), callback, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {

        @Override
        public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
            return input.rhs.getInterval().getStart().equals(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS + 2));
        }
    });
    DataSegment segment2 = makeSegment(INITIAL_SEGMENTS + 2);
    segmentAnnouncer.announceSegment(segment2);
    testSegments.add(segment2);
    DataSegment oldSegment = makeSegment(-1);
    segmentAnnouncer.announceSegment(oldSegment);
    testSegments.add(oldSegment);
    segmentAnnouncer.unannounceSegment(oldSegment);
    testSegments.remove(oldSegment);
    waitForSync(filteredBatchServerInventoryView, testSegments);
    segmentAnnouncer.unannounceSegment(segment2);
    testSegments.remove(segment2);
    waitForSync(filteredBatchServerInventoryView, testSegments);
    timing.forWaiting().awaitLatch(removeCallbackLatch);
    EasyMock.verify(callback);
}
Also used : DruidServer(io.druid.client.DruidServer) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(io.druid.timeline.DataSegment) Comparator(java.util.Comparator) IAnswer(org.easymock.IAnswer) ServerView(io.druid.client.ServerView) Pair(io.druid.java.util.common.Pair) Test(org.junit.Test)

Example 12 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BatchServerInventoryViewTest method testSameTimeZnode.

@Test
public void testSameTimeZnode() throws Exception {
    final int numThreads = INITIAL_SEGMENTS / 10;
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
    segmentAnnouncer.announceSegments(testSegments);
    waitForSync(batchServerInventoryView, testSegments);
    DruidServer server = Iterables.get(batchServerInventoryView.getInventory(), 0);
    final Set<DataSegment> segments = Sets.newHashSet(server.getSegments().values());
    Assert.assertEquals(testSegments, segments);
    final CountDownLatch latch = new CountDownLatch(numThreads);
    final List<ListenableFuture<BatchDataSegmentAnnouncer>> futures = new ArrayList<>();
    for (int i = 0; i < numThreads; ++i) {
        final int ii = i;
        futures.add(executor.submit(new Callable<BatchDataSegmentAnnouncer>() {

            @Override
            public BatchDataSegmentAnnouncer call() {
                BatchDataSegmentAnnouncer segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {

                    @Override
                    public int getSegmentsPerNode() {
                        return 50;
                    }
                }, new ZkPathsConfig() {

                    @Override
                    public String getBase() {
                        return testBasePath;
                    }
                }, announcer, jsonMapper);
                segmentAnnouncer.start();
                List<DataSegment> segments = new ArrayList<DataSegment>();
                try {
                    for (int j = 0; j < INITIAL_SEGMENTS / numThreads; ++j) {
                        segments.add(makeSegment(INITIAL_SEGMENTS + ii + numThreads * j));
                    }
                    latch.countDown();
                    latch.await();
                    segmentAnnouncer.announceSegments(segments);
                    testSegments.addAll(segments);
                } catch (Exception e) {
                    throw Throwables.propagate(e);
                }
                return segmentAnnouncer;
            }
        }));
    }
    final List<BatchDataSegmentAnnouncer> announcers = Futures.<BatchDataSegmentAnnouncer>allAsList(futures).get();
    Assert.assertEquals(INITIAL_SEGMENTS * 2, testSegments.size());
    waitForSync(batchServerInventoryView, testSegments);
    Assert.assertEquals(testSegments, Sets.newHashSet(server.getSegments().values()));
    for (int i = 0; i < INITIAL_SEGMENTS; ++i) {
        final DataSegment segment = makeSegment(100 + i);
        segmentAnnouncer.unannounceSegment(segment);
        testSegments.remove(segment);
    }
    waitForSync(batchServerInventoryView, testSegments);
    Assert.assertEquals(testSegments, Sets.newHashSet(server.getSegments().values()));
}
Also used : ArrayList(java.util.ArrayList) BatchDataSegmentAnnouncerConfig(io.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DruidServer(io.druid.client.DruidServer) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(io.druid.timeline.DataSegment) Callable(java.util.concurrent.Callable) ExpectedException(org.junit.rules.ExpectedException) ZkPathsConfig(io.druid.server.initialization.ZkPathsConfig) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) BatchDataSegmentAnnouncer(io.druid.server.coordination.BatchDataSegmentAnnouncer) Test(org.junit.Test)

Example 13 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ImmutableSegmentLoadInfoTest method testSerde.

@Test
public void testSerde() throws IOException {
    ImmutableSegmentLoadInfo segmentLoadInfo = new ImmutableSegmentLoadInfo(new DataSegment("test_ds", new Interval("2011-04-01/2011-04-02"), "v1", null, null, null, NoneShardSpec.instance(), 0, 0), Sets.newHashSet(new DruidServerMetadata("a", "host", 10, "type", "tier", 1)));
    ImmutableSegmentLoadInfo serde = mapper.readValue(mapper.writeValueAsBytes(segmentLoadInfo), ImmutableSegmentLoadInfo.class);
    Assert.assertEquals(segmentLoadInfo, serde);
}
Also used : ImmutableSegmentLoadInfo(io.druid.client.ImmutableSegmentLoadInfo) DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 14 with DruidServerMetadata

use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class TierSelectorStrategyTest method testTierSelectorStrategy.

private void testTierSelectorStrategy(TierSelectorStrategy tierSelectorStrategy, QueryableDruidServer... expectedSelection) {
    final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), com.google.common.collect.Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), tierSelectorStrategy);
    List<QueryableDruidServer> servers = Lists.newArrayList(expectedSelection);
    List<DruidServerMetadata> expectedCandidates = Lists.newArrayList();
    for (QueryableDruidServer server : servers) {
        expectedCandidates.add(server.getServer().getMetadata());
    }
    Collections.shuffle(servers);
    for (QueryableDruidServer server : servers) {
        serverSelector.addServerAndUpdateSegment(server, serverSelector.getSegment());
    }
    Assert.assertEquals(expectedSelection[0], serverSelector.pick());
    Assert.assertEquals(expectedCandidates, serverSelector.getCandidates(-1));
    Assert.assertEquals(expectedCandidates.subList(0, 2), serverSelector.getCandidates(2));
}
Also used : DruidServerMetadata(io.druid.server.coordination.DruidServerMetadata) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) Interval(org.joda.time.Interval)

Aggregations

DruidServerMetadata (io.druid.server.coordination.DruidServerMetadata)14 DataSegment (io.druid.timeline.DataSegment)12 DruidServer (io.druid.client.DruidServer)5 Test (org.junit.Test)4 ImmutableDruidServer (io.druid.client.ImmutableDruidServer)3 Pair (io.druid.java.util.common.Pair)3 BatchDataSegmentAnnouncer (io.druid.server.coordination.BatchDataSegmentAnnouncer)3 BatchDataSegmentAnnouncerConfig (io.druid.server.initialization.BatchDataSegmentAnnouncerConfig)3 ZkPathsConfig (io.druid.server.initialization.ZkPathsConfig)3 Interval (org.joda.time.Interval)3 ServerView (io.druid.client.ServerView)2 PotentiallyGzippedCompressionProvider (io.druid.curator.PotentiallyGzippedCompressionProvider)2 Announcer (io.druid.curator.announcement.Announcer)2 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)2 Set (java.util.Set)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)2 TestingCluster (org.apache.curator.test.TestingCluster)2 DateTime (org.joda.time.DateTime)2 Before (org.junit.Before)2