Search in sources :

Example 21 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BatchServerInventoryViewTest method testRunWithFilterCallback.

@Test
public void testRunWithFilterCallback() throws Exception {
    final CountDownLatch removeCallbackLatch = new CountDownLatch(1);
    segmentAnnouncer.announceSegments(testSegments);
    waitForSync(filteredBatchServerInventoryView, testSegments);
    DruidServer server = Iterables.get(filteredBatchServerInventoryView.getInventory(), 0);
    Set<DataSegment> segments = Sets.newHashSet(server.iterateAllSegments());
    Assert.assertEquals(testSegments, segments);
    ServerView.SegmentCallback callback = EasyMock.createStrictMock(ServerView.SegmentCallback.class);
    Comparator<DataSegment> dataSegmentComparator = Comparator.comparing(DataSegment::getInterval, Comparators.intervalsByStartThenEnd());
    EasyMock.expect(callback.segmentAdded(EasyMock.anyObject(), EasyMock.cmp(makeSegment(INITIAL_SEGMENTS + 2), dataSegmentComparator, LogicalOperator.EQUAL))).andReturn(ServerView.CallbackAction.CONTINUE).times(1);
    EasyMock.expect(callback.segmentRemoved(EasyMock.anyObject(), EasyMock.cmp(makeSegment(INITIAL_SEGMENTS + 2), dataSegmentComparator, LogicalOperator.EQUAL))).andAnswer(new IAnswer<ServerView.CallbackAction>() {

        @Override
        public ServerView.CallbackAction answer() {
            removeCallbackLatch.countDown();
            return ServerView.CallbackAction.CONTINUE;
        }
    }).times(1);
    EasyMock.replay(callback);
    filteredBatchServerInventoryView.registerSegmentCallback(Execs.directExecutor(), callback, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {

        @Override
        public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
            return input.rhs.getInterval().getStart().equals(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS + 2));
        }
    });
    DataSegment segment2 = makeSegment(INITIAL_SEGMENTS + 2);
    segmentAnnouncer.announceSegment(segment2);
    testSegments.add(segment2);
    DataSegment oldSegment = makeSegment(-1);
    segmentAnnouncer.announceSegment(oldSegment);
    testSegments.add(oldSegment);
    segmentAnnouncer.unannounceSegment(oldSegment);
    testSegments.remove(oldSegment);
    waitForSync(filteredBatchServerInventoryView, testSegments);
    segmentAnnouncer.unannounceSegment(segment2);
    testSegments.remove(segment2);
    waitForSync(filteredBatchServerInventoryView, testSegments);
    TIMING.forWaiting().awaitLatch(removeCallbackLatch);
    EasyMock.verify(callback);
}
Also used : DruidServer(org.apache.druid.client.DruidServer) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) IAnswer(org.easymock.IAnswer) ServerView(org.apache.druid.client.ServerView) Pair(org.apache.druid.java.util.common.Pair) Test(org.junit.Test)

Example 22 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BatchServerInventoryViewTest method testSameTimeZnode.

@Test
public void testSameTimeZnode() throws Exception {
    final int numThreads = INITIAL_SEGMENTS / 10;
    final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
    segmentAnnouncer.announceSegments(testSegments);
    waitForSync(batchServerInventoryView, testSegments);
    DruidServer server = Iterables.get(batchServerInventoryView.getInventory(), 0);
    final Set<DataSegment> segments = Sets.newHashSet(server.iterateAllSegments());
    Assert.assertEquals(testSegments, segments);
    final CountDownLatch latch = new CountDownLatch(numThreads);
    final List<ListenableFuture<BatchDataSegmentAnnouncer>> futures = new ArrayList<>();
    for (int i = 0; i < numThreads; ++i) {
        final int ii = i;
        futures.add(executor.submit(new Callable<BatchDataSegmentAnnouncer>() {

            @Override
            public BatchDataSegmentAnnouncer call() {
                BatchDataSegmentAnnouncer segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0), new BatchDataSegmentAnnouncerConfig() {

                    @Override
                    public int getSegmentsPerNode() {
                        return 50;
                    }
                }, new ZkPathsConfig() {

                    @Override
                    public String getBase() {
                        return TEST_BASE_PATH;
                    }
                }, announcer, jsonMapper);
                List<DataSegment> segments = new ArrayList<DataSegment>();
                try {
                    for (int j = 0; j < INITIAL_SEGMENTS / numThreads; ++j) {
                        segments.add(makeSegment(INITIAL_SEGMENTS + ii + numThreads * j));
                    }
                    latch.countDown();
                    latch.await();
                    segmentAnnouncer.announceSegments(segments);
                    testSegments.addAll(segments);
                } catch (Exception e) {
                    throw new RuntimeException(e);
                }
                return segmentAnnouncer;
            }
        }));
    }
    final List<BatchDataSegmentAnnouncer> announcers = Futures.allAsList(futures).get();
    Assert.assertEquals(INITIAL_SEGMENTS * 2, testSegments.size());
    waitForSync(batchServerInventoryView, testSegments);
    Assert.assertEquals(testSegments, Sets.newHashSet(server.iterateAllSegments()));
    for (int i = 0; i < INITIAL_SEGMENTS; ++i) {
        final DataSegment segment = makeSegment(100 + i);
        segmentAnnouncer.unannounceSegment(segment);
        testSegments.remove(segment);
    }
    waitForSync(batchServerInventoryView, testSegments);
    Assert.assertEquals(testSegments, Sets.newHashSet(server.iterateAllSegments()));
}
Also used : ArrayList(java.util.ArrayList) BatchDataSegmentAnnouncerConfig(org.apache.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DruidServer(org.apache.druid.client.DruidServer) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) Callable(java.util.concurrent.Callable) ExpectedException(org.junit.rules.ExpectedException) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) BatchDataSegmentAnnouncer(org.apache.druid.server.coordination.BatchDataSegmentAnnouncer) Test(org.junit.Test)

Example 23 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ServersResourceTest method testDruidServerMetadataSerde.

@Test
public void testDruidServerMetadataSerde() throws Exception {
    DruidServerMetadata metadata = new DruidServerMetadata("dummy", "host", null, 1234, ServerType.HISTORICAL, "tier", 1);
    String metadataJson = objectMapper.writeValueAsString(metadata);
    String expected = "{\"name\":\"dummy\",\"host\":\"host\",\"hostAndTlsPort\":null,\"maxSize\":1234,\"type\":\"historical\",\"tier\":\"tier\",\"priority\":1}";
    Assert.assertEquals(expected, metadataJson);
    DruidServerMetadata deserializedMetadata = objectMapper.readValue(metadataJson, DruidServerMetadata.class);
    Assert.assertEquals(metadata, deserializedMetadata);
    metadata = new DruidServerMetadata("host:123", "host:123", null, 0, ServerType.HISTORICAL, "t1", 0);
    Assert.assertEquals(metadata, objectMapper.readValue("{\"name\":\"host:123\",\"maxSize\":0,\"type\":\"HISTORICAL\",\"tier\":\"t1\",\"priority\":0,\"host\":\"host:123\"}", DruidServerMetadata.class));
    metadata = new DruidServerMetadata("host:123", "host:123", "host:214", 0, ServerType.HISTORICAL, "t1", 0);
    Assert.assertEquals(metadata, objectMapper.readValue("{\"name\":\"host:123\",\"maxSize\":0,\"type\":\"HISTORICAL\",\"tier\":\"t1\",\"priority\":0,\"host\":\"host:123\",\"hostAndTlsPort\":\"host:214\"}", DruidServerMetadata.class));
    Assert.assertEquals(metadata, objectMapper.readValue(objectMapper.writeValueAsString(metadata), DruidServerMetadata.class));
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) Test(org.junit.Test)

Example 24 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class StorageNodeModuleTest method testDruidServerMetadataIsInjectedAsSingleton.

@Test
public void testDruidServerMetadataIsInjectedAsSingleton() {
    DruidServerMetadata druidServerMetadata = injector.getInstance(DruidServerMetadata.class);
    Assert.assertNotNull(druidServerMetadata);
    DruidServerMetadata other = injector.getInstance(DruidServerMetadata.class);
    Assert.assertSame(druidServerMetadata, other);
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) Test(org.junit.Test)

Example 25 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.

/**
 * This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
 * It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
 * {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
 * more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
 * BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
 * processing. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    // Trigger updates of BrokerServerView. This should be done asynchronously.
    // add completely new segments
    addSegmentsToCluster(numExistingSegments, numServers, 50);
    // add replicas of the first 30 segments.
    addReplicasToCluster(1, numServers, 30);
    // for the first 30 segments, we will still have replicas.
    // for the other 20 segments, they will be completely removed from the cluster.
    removeSegmentsFromCluster(numServers, 50);
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertTrue(hasTimeline);
        // We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) TableDataSource(org.apache.druid.query.TableDataSource) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Aggregations

DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)37 DataSegment (org.apache.druid.timeline.DataSegment)28 Test (org.junit.Test)25 CountDownLatch (java.util.concurrent.CountDownLatch)16 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)16 List (java.util.List)14 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)13 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)13 NoopEscalator (org.apache.druid.server.security.NoopEscalator)13 SegmentId (org.apache.druid.timeline.SegmentId)13 Pair (org.apache.druid.java.util.common.Pair)12 ImmutableList (com.google.common.collect.ImmutableList)11 ImmutableMap (com.google.common.collect.ImmutableMap)11 Collectors (java.util.stream.Collectors)11 Intervals (org.apache.druid.java.util.common.Intervals)11 Before (org.junit.Before)11 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Map (java.util.Map)10 TableDataSource (org.apache.druid.query.TableDataSource)10 ImmutableSet (com.google.common.collect.ImmutableSet)9