Search in sources :

Example 31 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaTest method testServerSegmentRemovedCallbackRemoveHistoricalSegment.

@Test
public void testServerSegmentRemovedCallbackRemoveHistoricalSegment() throws InterruptedException {
    String datasource = "serverSegmentRemoveTest";
    CountDownLatch addSegmentLatch = new CountDownLatch(1);
    CountDownLatch removeServerSegmentLatch = new CountDownLatch(1);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.addSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                addSegmentLatch.countDown();
            }
        }

        @Override
        void removeServerSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.removeServerSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                removeServerSegmentLatch.countDown();
            }
        }
    };
    DataSegment segment = newSegment(datasource, 1);
    serverView.addSegment(segment, ServerType.HISTORICAL);
    serverView.addSegment(segment, ServerType.BROKER);
    Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
    serverView.removeSegment(segment, ServerType.HISTORICAL);
    Assert.assertTrue(removeServerSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(5, schema.getTotalSegments());
    List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
    Assert.assertEquals(1, metadatas.size());
    AvailableSegmentMetadata metadata = metadatas.get(0);
    Assert.assertEquals(0, metadata.isRealtime());
    Assert.assertEquals(0, metadata.getNumRows());
    // brokers are not counted as replicas yet
    Assert.assertEquals(0, metadata.getNumReplicas());
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 32 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class BatchDataSegmentAnnouncerTest method setUp.

@Before
public void setUp() throws Exception {
    testingCluster = new TestingCluster(1);
    testingCluster.start();
    cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
    cf.start();
    cf.blockUntilConnected();
    cf.create().creatingParentsIfNeeded().forPath(TEST_BASE_PATH);
    jsonMapper = TestHelper.makeJsonMapper();
    announcer = new TestAnnouncer(cf, Execs.directExecutor());
    announcer.start();
    segmentReader = new SegmentReader(cf, jsonMapper);
    skipDimensionsAndMetrics = false;
    skipLoadSpec = false;
    segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0), new BatchDataSegmentAnnouncerConfig() {

        @Override
        public int getSegmentsPerNode() {
            return 50;
        }

        @Override
        public long getMaxBytesPerNode() {
            return maxBytesPerNode.get();
        }

        @Override
        public boolean isSkipDimensionsAndMetrics() {
            return skipDimensionsAndMetrics;
        }

        @Override
        public boolean isSkipLoadSpec() {
            return skipLoadSpec;
        }
    }, new ZkPathsConfig() {

        @Override
        public String getBase() {
            return TEST_BASE_PATH;
        }
    }, announcer, jsonMapper);
    testSegments = new HashSet<>();
    for (int i = 0; i < 100; i++) {
        testSegments.add(makeSegment(i));
    }
    exec = Execs.multiThreaded(NUM_THREADS, "BatchDataSegmentAnnouncerTest-%d");
}
Also used : TestingCluster(org.apache.curator.test.TestingCluster) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) ZkPathsConfig(org.apache.druid.server.initialization.ZkPathsConfig) BatchDataSegmentAnnouncerConfig(org.apache.druid.server.initialization.BatchDataSegmentAnnouncerConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) PotentiallyGzippedCompressionProvider(org.apache.druid.curator.PotentiallyGzippedCompressionProvider) BatchDataSegmentAnnouncer(org.apache.druid.server.coordination.BatchDataSegmentAnnouncer) Before(org.junit.Before)

Example 33 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ServerHolderTest method testEquals.

@Test
public void testEquals() {
    final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    final ServerHolder h2 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name2", "host1", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    final ServerHolder h3 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host2", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    final ServerHolder h4 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 200L, ServerType.HISTORICAL, "tier2", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    final ServerHolder h5 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.REALTIME, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    Assert.assertEquals(h1, h2);
    Assert.assertNotEquals(h1, h3);
    Assert.assertNotEquals(h1, h4);
    Assert.assertNotEquals(h1, h5);
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 34 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ServerHolderTest method testIsServingSegment.

@Test
public void testIsServingSegment() {
    final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    Assert.assertTrue(h1.isServingSegment(SEGMENTS.get(0)));
    Assert.assertFalse(h1.isServingSegment(SEGMENTS.get(1)));
    Assert.assertTrue(h1.isServingSegment(SEGMENTS.get(0).getId()));
    Assert.assertFalse(h1.isServingSegment(SEGMENTS.get(1).getId()));
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 35 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ServerHolderTest method testCompareTo.

@Test
public void testCompareTo() {
    // available size of 100
    final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    // available size of 100
    final ServerHolder h2 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    // available size of 10
    final ServerHolder h3 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 1000L, ServerType.HISTORICAL, "tier1", 0), 990L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    // available size of 50
    final ServerHolder h4 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 50L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    Assert.assertEquals(0, h1.compareTo(h2));
    Assert.assertEquals(-1, h3.compareTo(h1));
    Assert.assertEquals(-1, h3.compareTo(h4));
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Aggregations

DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)37 DataSegment (org.apache.druid.timeline.DataSegment)28 Test (org.junit.Test)25 CountDownLatch (java.util.concurrent.CountDownLatch)16 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)16 List (java.util.List)14 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)13 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)13 NoopEscalator (org.apache.druid.server.security.NoopEscalator)13 SegmentId (org.apache.druid.timeline.SegmentId)13 Pair (org.apache.druid.java.util.common.Pair)12 ImmutableList (com.google.common.collect.ImmutableList)11 ImmutableMap (com.google.common.collect.ImmutableMap)11 Collectors (java.util.stream.Collectors)11 Intervals (org.apache.druid.java.util.common.Intervals)11 Before (org.junit.Before)11 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Map (java.util.Map)10 TableDataSource (org.apache.druid.query.TableDataSource)10 ImmutableSet (com.google.common.collect.ImmutableSet)9