Search in sources :

Example 1 with CallbackAction

use of org.apache.druid.client.ServerView.CallbackAction in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.

/**
 * This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
 * It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
 * {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
 * more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
 * BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
 * processing. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    // Trigger updates of BrokerServerView. This should be done asynchronously.
    // add completely new segments
    addSegmentsToCluster(numExistingSegments, numServers, 50);
    // add replicas of the first 30 segments.
    addReplicasToCluster(1, numServers, 30);
    // for the first 30 segments, we will still have replicas.
    // for the other 20 segments, they will be completely removed from the cluster.
    removeSegmentsFromCluster(numServers, 50);
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertTrue(hasTimeline);
        // We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) TableDataSource(org.apache.druid.query.TableDataSource) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Example 2 with CallbackAction

use of org.apache.druid.client.ServerView.CallbackAction in project druid by druid-io.

the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata.

/**
 * This tests the contention between 2 methods of DruidSchema, {@link DruidSchema#refresh} and
 * {@link DruidSchema#getSegmentMetadataSnapshot()}. It first triggers refreshing DruidSchema.
 * To mimic some heavy work done with {@link DruidSchema#lock}, {@link DruidSchema#buildDruidTable} is overriden
 * to sleep before doing real work. While refreshing DruidSchema, getSegmentMetadataSnapshot() is continuously
 * called to mimic reading the segments table of SystemSchema. All these calls must return without heavy contention.
 */
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata() throws InterruptedException, ExecutionException, TimeoutException {
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        DruidTable buildDruidTable(final String dataSource) {
            doInLock(() -> {
                try {
                    // Mimic some heavy work done in lock in DruidSchema
                    Thread.sleep(5000);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            });
            return super.buildDruidTable(dataSource);
        }
    };
    int numExistingSegments = 100;
    int numServers = 19;
    CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
    serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {

        @Override
        public CallbackAction timelineInitialized() {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            segmentLoadLatch.countDown();
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction segmentRemoved(DataSegment segment) {
            return CallbackAction.CONTINUE;
        }

        @Override
        public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
            return CallbackAction.CONTINUE;
        }
    });
    addSegmentsToCluster(0, numServers, numExistingSegments);
    // Wait for all segments to be loaded in BrokerServerView
    Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
    // Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
    Future refreshFuture = exec.submit(() -> {
        schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
        return null;
    });
    Assert.assertFalse(refreshFuture.isDone());
    for (int i = 0; i < 1000; i++) {
        Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = exec.submit(() -> schema.getSegmentMetadataSnapshot()).get(100, TimeUnit.MILLISECONDS);
        Assert.assertFalse(segmentsMetadata.isEmpty());
        // We want to call getTimeline while refreshing. Sleep might help with timing.
        Thread.sleep(2);
    }
    refreshFuture.get(10, TimeUnit.SECONDS);
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) CallbackAction(org.apache.druid.client.ServerView.CallbackAction) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) TimelineCallback(org.apache.druid.client.TimelineServerView.TimelineCallback) Future(java.util.concurrent.Future) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)2 Future (java.util.concurrent.Future)2 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)2 CallbackAction (org.apache.druid.client.ServerView.CallbackAction)2 TimelineCallback (org.apache.druid.client.TimelineServerView.TimelineCallback)2 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)2 DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)2 NoopEscalator (org.apache.druid.server.security.NoopEscalator)2 DruidTable (org.apache.druid.sql.calcite.table.DruidTable)2 DataSegment (org.apache.druid.timeline.DataSegment)2 Test (org.junit.Test)2 TableDataSource (org.apache.druid.query.TableDataSource)1 SegmentId (org.apache.druid.timeline.SegmentId)1