Search in sources :

Example 1 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class ImmutableSegmentLoadInfoTest method testSerde.

@Test
public void testSerde() throws IOException {
    ImmutableSegmentLoadInfo segmentLoadInfo = new ImmutableSegmentLoadInfo(new DataSegment("test_ds", Intervals.of("2011-04-01/2011-04-02"), "v1", null, null, null, NoneShardSpec.instance(), 0, 0), Sets.newHashSet(new DruidServerMetadata("a", "host", null, 10, ServerType.HISTORICAL, "tier", 1)));
    ImmutableSegmentLoadInfo serde = mapper.readValue(mapper.writeValueAsBytes(segmentLoadInfo), ImmutableSegmentLoadInfo.class);
    Assert.assertEquals(segmentLoadInfo, serde);
}
Also used : ImmutableSegmentLoadInfo(org.apache.druid.client.ImmutableSegmentLoadInfo) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 2 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class TestServerInventoryView method addSegment.

public void addSegment(DataSegment segment, ServerType serverType) {
    final Pair<DruidServerMetadata, List<DataSegment>> whichServerAndSegments = getDummyServerAndSegmentsForType(serverType);
    final DruidServerMetadata whichServer = whichServerAndSegments.lhs;
    whichServerAndSegments.rhs.add(segment);
    segmentCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> execAndCallback.rhs.segmentAdded(whichServer, segment)));
    timelineCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> execAndCallback.rhs.segmentAdded(whichServer, segment)));
}
Also used : ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata)

Example 3 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class TestServerInventoryView method removeSegment.

public void removeSegment(DataSegment segment, ServerType serverType) {
    final Pair<DruidServerMetadata, List<DataSegment>> whichServerAndSegments = getDummyServerAndSegmentsForType(serverType);
    final DruidServerMetadata whichServer = whichServerAndSegments.lhs;
    whichServerAndSegments.rhs.remove(segment);
    segmentCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> execAndCallback.rhs.segmentRemoved(whichServer, segment)));
    timelineCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> {
        execAndCallback.rhs.serverSegmentRemoved(whichServer, segment);
        // Fire segmentRemoved if all replicas have been removed.
        if (!segments.contains(segment) && !brokerSegments.contains(segment) && !realtimeSegments.contains(segment)) {
            execAndCallback.rhs.segmentRemoved(segment);
        }
    }));
}
Also used : ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata)

Example 4 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class TestServerInventoryView method getDummyServerAndSegmentsForType.

private Pair<DruidServerMetadata, List<DataSegment>> getDummyServerAndSegmentsForType(ServerType serverType) {
    final DruidServerMetadata whichServer;
    final List<DataSegment> whichSegments;
    switch(serverType) {
        case BROKER:
            whichServer = DUMMY_BROKER;
            whichSegments = brokerSegments;
            break;
        case REALTIME:
            whichServer = DUMMY_SERVER_REALTIME;
            whichSegments = realtimeSegments;
            break;
        default:
            whichServer = DUMMY_SERVER;
            whichSegments = segments;
            break;
    }
    return new Pair<>(whichServer, whichSegments);
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) Pair(org.apache.druid.java.util.common.Pair)

Example 5 with DruidServerMetadata

use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.

the class DruidSchemaTest method testSegmentRemovedCallbackNonEmptyDataSourceAfterRemove.

@Test
public void testSegmentRemovedCallbackNonEmptyDataSourceAfterRemove() throws InterruptedException, IOException {
    String datasource = "segmentRemoveTest";
    CountDownLatch addSegmentLatch = new CountDownLatch(2);
    CountDownLatch removeSegmentLatch = new CountDownLatch(1);
    DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
            super.addSegment(server, segment);
            if (datasource.equals(segment.getDataSource())) {
                addSegmentLatch.countDown();
            }
        }

        @Override
        void removeSegment(final DataSegment segment) {
            super.removeSegment(segment);
            if (datasource.equals(segment.getDataSource())) {
                removeSegmentLatch.countDown();
            }
        }
    };
    List<DataSegment> segments = ImmutableList.of(newSegment(datasource, 1), newSegment(datasource, 2));
    serverView.addSegment(segments.get(0), ServerType.REALTIME);
    serverView.addSegment(segments.get(1), ServerType.HISTORICAL);
    Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
    schema.refresh(segments.stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(datasource));
    serverView.removeSegment(segments.get(0), ServerType.REALTIME);
    Assert.assertTrue(removeSegmentLatch.await(1, TimeUnit.SECONDS));
    Assert.assertEquals(5, schema.getTotalSegments());
    List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
    Assert.assertEquals(1, metadatas.size());
    Assert.assertFalse(schema.getSegmentsNeedingRefresh().contains(segments.get(0).getId()));
    Assert.assertFalse(schema.getMutableSegments().contains(segments.get(0).getId()));
    Assert.assertTrue(schema.getDataSourcesNeedingRebuild().contains(datasource));
    Assert.assertTrue(schema.getTableNames().contains(datasource));
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) NoopEscalator(org.apache.druid.server.security.NoopEscalator) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) CountDownLatch(java.util.concurrent.CountDownLatch) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)37 DataSegment (org.apache.druid.timeline.DataSegment)28 Test (org.junit.Test)25 CountDownLatch (java.util.concurrent.CountDownLatch)16 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)16 List (java.util.List)14 BrokerInternalQueryConfig (org.apache.druid.client.BrokerInternalQueryConfig)13 MapJoinableFactory (org.apache.druid.segment.join.MapJoinableFactory)13 NoopEscalator (org.apache.druid.server.security.NoopEscalator)13 SegmentId (org.apache.druid.timeline.SegmentId)13 Pair (org.apache.druid.java.util.common.Pair)12 ImmutableList (com.google.common.collect.ImmutableList)11 ImmutableMap (com.google.common.collect.ImmutableMap)11 Collectors (java.util.stream.Collectors)11 Intervals (org.apache.druid.java.util.common.Intervals)11 Before (org.junit.Before)11 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)10 Map (java.util.Map)10 TableDataSource (org.apache.druid.query.TableDataSource)10 ImmutableSet (com.google.common.collect.ImmutableSet)9