Search in sources :

Example 16 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class HashJoinSegmentTest method setUp.

@Before
public void setUp() throws IOException {
    allReferencesAcquireCount = 0;
    allReferencesCloseCount = 0;
    referencedSegmentAcquireCount = 0;
    referencedSegmentClosedCount = 0;
    indexedTableJoinableReferenceAcquireCount = 0;
    indexedTableJoinableReferenceCloseCount = 0;
    j0Closed = false;
    j1Closed = false;
    baseSegment = new QueryableIndexSegment(JoinTestHelper.createFactIndexBuilder(temporaryFolder.newFolder()).buildMMappedIndex(), SegmentId.dummy("facts"));
    List<JoinableClause> joinableClauses = ImmutableList.of(new JoinableClause("j0.", new IndexedTableJoinable(JoinTestHelper.createCountriesIndexedTable()) {

        @Override
        public Optional<Closeable> acquireReferences() {
            if (!j0Closed) {
                indexedTableJoinableReferenceAcquireCount++;
                Closer closer = Closer.create();
                closer.register(() -> indexedTableJoinableReferenceCloseCount++);
                return Optional.of(closer);
            }
            return Optional.empty();
        }
    }, JoinType.LEFT, JoinConditionAnalysis.forExpression("1", "j0.", ExprMacroTable.nil())), new JoinableClause("j1.", new IndexedTableJoinable(JoinTestHelper.createRegionsIndexedTable()) {

        @Override
        public Optional<Closeable> acquireReferences() {
            if (!j1Closed) {
                indexedTableJoinableReferenceAcquireCount++;
                Closer closer = Closer.create();
                closer.register(() -> indexedTableJoinableReferenceCloseCount++);
                return Optional.of(closer);
            }
            return Optional.empty();
        }
    }, JoinType.LEFT, JoinConditionAnalysis.forExpression("1", "j1.", ExprMacroTable.nil())));
    referencedSegment = ReferenceCountingSegment.wrapRootGenerationSegment(baseSegment);
    SegmentReference testWrapper = new SegmentReference() {

        @Override
        public Optional<Closeable> acquireReferences() {
            Closer closer = Closer.create();
            return referencedSegment.acquireReferences().map(closeable -> {
                referencedSegmentAcquireCount++;
                closer.register(closeable);
                closer.register(() -> referencedSegmentClosedCount++);
                return closer;
            });
        }

        @Override
        public SegmentId getId() {
            return referencedSegment.getId();
        }

        @Override
        public Interval getDataInterval() {
            return referencedSegment.getDataInterval();
        }

        @Nullable
        @Override
        public QueryableIndex asQueryableIndex() {
            return referencedSegment.asQueryableIndex();
        }

        @Override
        public StorageAdapter asStorageAdapter() {
            return referencedSegment.asStorageAdapter();
        }

        @Override
        public void close() {
            referencedSegment.close();
        }
    };
    hashJoinSegment = new HashJoinSegment(testWrapper, null, joinableClauses, null) {

        @Override
        public Optional<Closeable> acquireReferences() {
            Closer closer = Closer.create();
            return super.acquireReferences().map(closeable -> {
                allReferencesAcquireCount++;
                closer.register(closeable);
                closer.register(() -> allReferencesCloseCount++);
                return closer;
            });
        }
    };
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) Closer(org.apache.druid.java.util.common.io.Closer) CoreMatchers(org.hamcrest.CoreMatchers) Closer(org.apache.druid.java.util.common.io.Closer) IndexedTableJoinable(org.apache.druid.segment.join.table.IndexedTableJoinable) QueryableIndex(org.apache.druid.segment.QueryableIndex) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test) IOException(java.io.IOException) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) StorageAdapter(org.apache.druid.segment.StorageAdapter) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) SegmentReference(org.apache.druid.segment.SegmentReference) Interval(org.joda.time.Interval) List(java.util.List) Rule(org.junit.Rule) ImmutableList(com.google.common.collect.ImmutableList) Closeable(java.io.Closeable) Optional(java.util.Optional) SegmentId(org.apache.druid.timeline.SegmentId) Assert(org.junit.Assert) QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) ExpectedException(org.junit.rules.ExpectedException) TemporaryFolder(org.junit.rules.TemporaryFolder) Nullable(javax.annotation.Nullable) Before(org.junit.Before) Optional(java.util.Optional) Closeable(java.io.Closeable) IndexedTableJoinable(org.apache.druid.segment.join.table.IndexedTableJoinable) SegmentReference(org.apache.druid.segment.SegmentReference) Before(org.junit.Before)

Example 17 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class DruidSchemaTest method testRunSegmentMetadataQueryWithContext.

/**
 * Ensure that the BrokerInternalQueryConfig context is honored for this internally generated SegmentMetadata Query
 */
@Test
public void testRunSegmentMetadataQueryWithContext() throws Exception {
    Map<String, Object> queryContext = ImmutableMap.of("priority", 5);
    String brokerInternalQueryConfigJson = "{\"context\": { \"priority\": 5} }";
    TestHelper.makeJsonMapper();
    BrokerInternalQueryConfig brokerInternalQueryConfig = MAPPER.readValue(MAPPER.writeValueAsString(MAPPER.readValue(brokerInternalQueryConfigJson, BrokerInternalQueryConfig.class)), BrokerInternalQueryConfig.class);
    DataSegment segment = newSegment("test", 0);
    List<SegmentId> segmentIterable = ImmutableList.of(segment.getId());
    // This is the query that we expect this method to create. We will be testing that it matches the query generated by the method under test.
    SegmentMetadataQuery expectedMetadataQuery = new SegmentMetadataQuery(new TableDataSource(segment.getDataSource()), new MultipleSpecificSegmentSpec(segmentIterable.stream().map(SegmentId::toDescriptor).collect(Collectors.toList())), new AllColumnIncluderator(), false, queryContext, EnumSet.noneOf(SegmentMetadataQuery.AnalysisType.class), false, false);
    QueryLifecycleFactory factoryMock = EasyMock.createMock(QueryLifecycleFactory.class);
    QueryLifecycle lifecycleMock = EasyMock.createMock(QueryLifecycle.class);
    // Need to create schema for this test because the available schemas don't mock the QueryLifecycleFactory, which I need for this test.
    DruidSchema mySchema = new DruidSchema(factoryMock, serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), brokerInternalQueryConfig, null);
    EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
    // This is the mat of the test, making sure that the query created by the method under test matches the expected query, specifically the operator configured context
    EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK)).andReturn(null);
    EasyMock.replay(factoryMock, lifecycleMock);
    mySchema.runSegmentMetadataQuery(segmentIterable);
    EasyMock.verify(factoryMock, lifecycleMock);
}
Also used : MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) QueryLifecycle(org.apache.druid.server.QueryLifecycle) SegmentId(org.apache.druid.timeline.SegmentId) DataSegment(org.apache.druid.timeline.DataSegment) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) NoopEscalator(org.apache.druid.server.security.NoopEscalator) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Test(org.junit.Test)

Example 18 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class DruidSchemaTest method testAvailableSegmentMetadataIsRealtime.

@Test
public void testAvailableSegmentMetadataIsRealtime() {
    Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = schema.getSegmentMetadataSnapshot();
    final List<DataSegment> segments = segmentsMetadata.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
    // find the only realtime segment with datasource "foo3"
    final DataSegment existingSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo3")).findFirst().orElse(null);
    Assert.assertNotNull(existingSegment);
    final AvailableSegmentMetadata metadata = segmentsMetadata.get(existingSegment.getId());
    Assert.assertEquals(1L, metadata.isRealtime());
    // get the historical server
    final ImmutableDruidServer historicalServer = druidServers.stream().filter(s -> s.getType().equals(ServerType.HISTORICAL)).findAny().orElse(null);
    Assert.assertNotNull(historicalServer);
    final DruidServerMetadata historicalServerMetadata = historicalServer.getMetadata();
    // add existingSegment to historical
    schema.addSegment(historicalServerMetadata, existingSegment);
    segmentsMetadata = schema.getSegmentMetadataSnapshot();
    // get the segment with datasource "foo3"
    DataSegment currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo3")).findFirst().orElse(null);
    Assert.assertNotNull(currentSegment);
    AvailableSegmentMetadata currentMetadata = segmentsMetadata.get(currentSegment.getId());
    Assert.assertEquals(0L, currentMetadata.isRealtime());
    ImmutableDruidServer realtimeServer = druidServers.stream().filter(s -> s.getType().equals(ServerType.REALTIME)).findAny().orElse(null);
    Assert.assertNotNull(realtimeServer);
    // drop existingSegment from realtime task
    schema.removeServerSegment(realtimeServer.getMetadata(), existingSegment);
    segmentsMetadata = schema.getSegmentMetadataSnapshot();
    currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo3")).findFirst().orElse(null);
    Assert.assertNotNull(currentSegment);
    currentMetadata = segmentsMetadata.get(currentSegment.getId());
    Assert.assertEquals(0L, currentMetadata.isRealtime());
}
Also used : Pair(org.apache.druid.java.util.common.Pair) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) After(org.junit.After) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) OffHeapMemorySegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.OffHeapMemorySegmentWriteOutMediumFactory) EnumSet(java.util.EnumSet) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) QueryableIndex(org.apache.druid.segment.QueryableIndex) Table(org.apache.calcite.schema.Table) Set(java.util.Set) IndexBuilder(org.apache.druid.segment.IndexBuilder) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) List(java.util.List) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Intervals(org.apache.druid.java.util.common.Intervals) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) QueryLifecycle(org.apache.druid.server.QueryLifecycle) CalciteTests(org.apache.druid.sql.calcite.util.CalciteTests) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) ImmutableList(com.google.common.collect.ImmutableList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) NoopEscalator(org.apache.druid.server.security.NoopEscalator) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) Before(org.junit.Before) RelDataType(org.apache.calcite.rel.type.RelDataType) Access(org.apache.druid.server.security.Access) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test) IOException(java.io.IOException) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) EasyMock(org.easymock.EasyMock) AllowAllAuthenticator(org.apache.druid.server.security.AllowAllAuthenticator) TableDataSource(org.apache.druid.query.TableDataSource) File(java.io.File) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) TimeUnit(java.util.concurrent.TimeUnit) TestHelper(org.apache.druid.segment.TestHelper) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Assert(org.junit.Assert) SegmentId(org.apache.druid.timeline.SegmentId) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 19 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class DruidSchemaTest method setUp.

@Before
public void setUp() throws Exception {
    final File tmpDir = temporaryFolder.newFolder();
    final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
    final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
    walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
    final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
    final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
    serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
    druidServers = serverView.getDruidServers();
    schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        @Override
        protected DruidTable buildDruidTable(String dataSource) {
            DruidTable table = super.buildDruidTable(dataSource);
            buildTableLatch.countDown();
            return table;
        }

        @Override
        void markDataSourceAsNeedRebuild(String datasource) {
            super.markDataSourceAsNeedRebuild(datasource);
            markDataSourceLatch.countDown();
        }
    };
    schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {

        boolean throwException = true;

        @Override
        protected DruidTable buildDruidTable(String dataSource) {
            DruidTable table = super.buildDruidTable(dataSource);
            buildTableLatch.countDown();
            return table;
        }

        @Override
        protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
            if (throwException) {
                throwException = false;
                throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
            } else {
                return super.refreshSegments(segments);
            }
        }

        @Override
        void markDataSourceAsNeedRebuild(String datasource) {
            super.markDataSourceAsNeedRebuild(datasource);
            markDataSourceLatch.countDown();
        }
    };
    schema.start();
    schema.awaitInitialization();
}
Also used : EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) TestServerInventoryView(org.apache.druid.sql.calcite.util.TestServerInventoryView) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) IndexBuilder(org.apache.druid.segment.IndexBuilder) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) DataSegment(org.apache.druid.timeline.DataSegment) NoopEscalator(org.apache.druid.server.security.NoopEscalator) MapJoinableFactory(org.apache.druid.segment.join.MapJoinableFactory) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) SegmentId(org.apache.druid.timeline.SegmentId) IOException(java.io.IOException) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) QueryableIndex(org.apache.druid.segment.QueryableIndex) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) File(java.io.File) Before(org.junit.Before)

Example 20 with SegmentId

use of org.apache.druid.timeline.SegmentId in project druid by druid-io.

the class LoadQueuePeonTest method testMultipleLoadDropSegments.

@Test
public void testMultipleLoadDropSegments() throws Exception {
    loadQueuePeon = new CuratorLoadQueuePeon(curator, LOAD_QUEUE_PATH, jsonMapper, Execs.scheduledSingleThreaded("test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("test_load_queue_peon-%d"), new TestDruidCoordinatorConfig(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 10, Duration.millis(0)));
    loadQueuePeon.start();
    ConcurrentMap<SegmentId, CountDownLatch> loadRequestSignals = new ConcurrentHashMap<>(5);
    ConcurrentMap<SegmentId, CountDownLatch> dropRequestSignals = new ConcurrentHashMap<>(5);
    ConcurrentMap<SegmentId, CountDownLatch> segmentLoadedSignals = new ConcurrentHashMap<>(5);
    ConcurrentMap<SegmentId, CountDownLatch> segmentDroppedSignals = new ConcurrentHashMap<>(5);
    final List<DataSegment> segmentToDrop = Lists.transform(ImmutableList.of("2014-10-26T00:00:00Z/P1D", "2014-10-25T00:00:00Z/P1D", "2014-10-24T00:00:00Z/P1D", "2014-10-23T00:00:00Z/P1D", "2014-10-22T00:00:00Z/P1D"), new Function<String, DataSegment>() {

        @Override
        public DataSegment apply(String intervalStr) {
            DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
            return dataSegment;
        }
    });
    final CountDownLatch[] dropRequestLatches = new CountDownLatch[5];
    final CountDownLatch[] dropSegmentLatches = new CountDownLatch[5];
    for (int i = 0; i < 5; i++) {
        dropRequestLatches[i] = new CountDownLatch(1);
        dropSegmentLatches[i] = new CountDownLatch(1);
    }
    int i = 0;
    for (DataSegment s : segmentToDrop) {
        dropRequestSignals.put(s.getId(), dropRequestLatches[i]);
        segmentDroppedSignals.put(s.getId(), dropSegmentLatches[i++]);
    }
    final List<DataSegment> segmentToLoad = Lists.transform(ImmutableList.of("2014-10-27T00:00:00Z/P1D", "2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D"), new Function<String, DataSegment>() {

        @Override
        public DataSegment apply(String intervalStr) {
            DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
            loadRequestSignals.put(dataSegment.getId(), new CountDownLatch(1));
            segmentLoadedSignals.put(dataSegment.getId(), new CountDownLatch(1));
            return dataSegment;
        }
    });
    final CountDownLatch[] loadRequestLatches = new CountDownLatch[5];
    final CountDownLatch[] segmentLoadedLatches = new CountDownLatch[5];
    for (i = 0; i < 5; i++) {
        loadRequestLatches[i] = new CountDownLatch(1);
        segmentLoadedLatches[i] = new CountDownLatch(1);
    }
    i = 0;
    for (DataSegment s : segmentToDrop) {
        loadRequestSignals.put(s.getId(), loadRequestLatches[i]);
        segmentLoadedSignals.put(s.getId(), segmentLoadedLatches[i++]);
    }
    // segment with latest interval should be loaded first
    final List<DataSegment> expectedLoadOrder = Lists.transform(ImmutableList.of("2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D", "2014-10-27T00:00:00Z/P1D"), intervalStr -> dataSegmentWithInterval(intervalStr));
    final DataSegmentChangeHandler handler = new DataSegmentChangeHandler() {

        @Override
        public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) {
            loadRequestSignals.get(segment.getId()).countDown();
        }

        @Override
        public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) {
            dropRequestSignals.get(segment.getId()).countDown();
        }
    };
    loadQueueCache.getListenable().addListener((client, event) -> {
        if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) {
            DataSegmentChangeRequest request = jsonMapper.readValue(event.getData().getData(), DataSegmentChangeRequest.class);
            request.go(handler, null);
        }
    });
    loadQueueCache.start();
    for (final DataSegment segment : segmentToDrop) {
        loadQueuePeon.dropSegment(segment, () -> segmentDroppedSignals.get(segment.getId()).countDown());
    }
    for (final DataSegment segment : segmentToLoad) {
        loadQueuePeon.loadSegment(segment, () -> segmentLoadedSignals.get(segment.getId()).countDown());
    }
    Assert.assertEquals(6000, loadQueuePeon.getLoadQueueSize());
    Assert.assertEquals(5, loadQueuePeon.getSegmentsToLoad().size());
    Assert.assertEquals(5, loadQueuePeon.getSegmentsToDrop().size());
    Assert.assertEquals(0, loadQueuePeon.getTimedOutSegments().size());
    for (DataSegment segment : segmentToDrop) {
        String dropRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
        Assert.assertTrue("Latch not counted down for " + dropRequestSignals.get(segment.getId()), dropRequestSignals.get(segment.getId()).await(10, TimeUnit.SECONDS));
        Assert.assertNotNull("Path " + dropRequestPath + " doesn't exist", curator.checkExists().forPath(dropRequestPath));
        Assert.assertEquals(segment, ((SegmentChangeRequestDrop) jsonMapper.readValue(curator.getData().decompressed().forPath(dropRequestPath), DataSegmentChangeRequest.class)).getSegment());
        // simulate completion of drop request by historical
        curator.delete().guaranteed().forPath(dropRequestPath);
        Assert.assertTrue(timing.forWaiting().awaitLatch(segmentDroppedSignals.get(segment.getId())));
    }
    for (DataSegment segment : expectedLoadOrder) {
        String loadRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
        Assert.assertTrue(timing.forWaiting().awaitLatch(loadRequestSignals.get(segment.getId())));
        Assert.assertNotNull(curator.checkExists().forPath(loadRequestPath));
        Assert.assertEquals(segment, ((SegmentChangeRequestLoad) jsonMapper.readValue(curator.getData().decompressed().forPath(loadRequestPath), DataSegmentChangeRequest.class)).getSegment());
        // simulate completion of load request by historical
        curator.delete().guaranteed().forPath(loadRequestPath);
        Assert.assertTrue(timing.forWaiting().awaitLatch(segmentLoadedSignals.get(segment.getId())));
    }
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) DataSegmentChangeRequest(org.apache.druid.server.coordination.DataSegmentChangeRequest) CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(org.apache.druid.timeline.DataSegment) DataSegmentChangeHandler(org.apache.druid.server.coordination.DataSegmentChangeHandler) DataSegmentChangeCallback(org.apache.druid.server.coordination.DataSegmentChangeCallback) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test)

Aggregations

SegmentId (org.apache.druid.timeline.SegmentId)63 DataSegment (org.apache.druid.timeline.DataSegment)32 Test (org.junit.Test)21 Interval (org.joda.time.Interval)14 ISE (org.apache.druid.java.util.common.ISE)13 ArrayList (java.util.ArrayList)12 Map (java.util.Map)12 Set (java.util.Set)12 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)12 List (java.util.List)11 ImmutableMap (com.google.common.collect.ImmutableMap)10 IOException (java.io.IOException)9 TreeMap (java.util.TreeMap)9 CountDownLatch (java.util.concurrent.CountDownLatch)9 VisibleForTesting (com.google.common.annotations.VisibleForTesting)8 Collectors (java.util.stream.Collectors)8 Optional (java.util.Optional)7 Sets (com.google.common.collect.Sets)6 Nullable (javax.annotation.Nullable)6 Response (javax.ws.rs.core.Response)6