Search in sources :

Example 16 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class MarkAsUnusedOvershadowedSegmentsTest method testRun.

@Test
@Parameters({ "historical", "broker" })
public void testRun(String serverTypeString) {
    ServerType serverType = ServerType.fromString(serverTypeString);
    markAsUnusedOvershadowedSegments = new MarkAsUnusedOvershadowedSegments(coordinator);
    usedSegments = ImmutableList.of(segmentV1, segmentV0, segmentV2);
    // Dummy values for comparisons in TreeSet
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).anyTimes();
    EasyMock.expect(druidServer.getMaxSize()).andReturn(0L).anyTimes();
    EasyMock.expect(druidServer.getCurrSize()).andReturn(0L).anyTimes();
    EasyMock.expect(druidServer.getName()).andReturn("").anyTimes();
    EasyMock.expect(druidServer.getHost()).andReturn("").anyTimes();
    EasyMock.expect(druidServer.getTier()).andReturn("").anyTimes();
    EasyMock.expect(druidServer.getType()).andReturn(serverType).anyTimes();
    EasyMock.expect(druidServer.getDataSources()).andReturn(ImmutableList.of(druidDataSource)).anyTimes();
    EasyMock.expect(druidDataSource.getSegments()).andReturn(ImmutableSet.of(segmentV1, segmentV2)).anyTimes();
    EasyMock.expect(druidDataSource.getName()).andReturn("test").anyTimes();
    coordinator.markSegmentAsUnused(segmentV1);
    coordinator.markSegmentAsUnused(segmentV0);
    EasyMock.expectLastCall();
    EasyMock.replay(mockPeon, coordinator, druidServer, druidDataSource);
    druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(druidServer, mockPeon)).build();
    DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder().withUsedSegmentsInTest(usedSegments).withCoordinatorStats(new CoordinatorStats()).withDruidCluster(druidCluster).withDynamicConfigs(RunRulesTest.COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).build();
    markAsUnusedOvershadowedSegments.run(params);
    EasyMock.verify(coordinator, druidDataSource, druidServer);
}
Also used : DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) ServerType(org.apache.druid.server.coordination.ServerType) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) ServerHolder(org.apache.druid.server.coordinator.ServerHolder) Parameters(junitparams.Parameters) Test(org.junit.Test) RunRulesTest(org.apache.druid.server.coordinator.RunRulesTest)

Example 17 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method assertLastSegmentNotCompacted.

private void assertLastSegmentNotCompacted(CompactSegments compactSegments) {
    // Segments of the latest interval should not be compacted
    for (int i = 0; i < 3; i++) {
        final String dataSource = DATA_SOURCE_PREFIX + i;
        final Interval interval = Intervals.of(StringUtils.format("2017-01-09T12:00:00/2017-01-10"));
        List<TimelineObjectHolder<String, DataSegment>> holders = dataSources.get(dataSource).lookup(interval);
        Assert.assertEquals(1, holders.size());
        for (TimelineObjectHolder<String, DataSegment> holder : holders) {
            List<PartitionChunk<DataSegment>> chunks = Lists.newArrayList(holder.getObject());
            Assert.assertEquals(4, chunks.size());
            for (PartitionChunk<DataSegment> chunk : chunks) {
                DataSegment segment = chunk.getObject();
                Assert.assertEquals(interval, segment.getInterval());
                Assert.assertEquals("version", segment.getVersion());
            }
        }
    }
    // Emulating realtime dataSource
    final String dataSource = DATA_SOURCE_PREFIX + 0;
    addMoreData(dataSource, 9);
    CoordinatorStats stats = doCompactSegments(compactSegments);
    Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
    addMoreData(dataSource, 10);
    stats = doCompactSegments(compactSegments);
    Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval)

Example 18 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method testMakeStatsWithDeactivatedDatasource.

@Test
public void testMakeStatsWithDeactivatedDatasource() {
    final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
    leaderClient.start();
    final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
    final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
    // Before any compaction, we do not have any snapshot of compactions
    Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
    Assert.assertEquals(0, autoCompactionSnapshots.size());
    for (int compactionRunCount = 0; compactionRunCount < 11; compactionRunCount++) {
        doCompactionAndAssertCompactSegmentStatistics(compactSegments, compactionRunCount);
    }
    // Test that stats does not change (and is still correct) when auto compaction runs with everything is fully compacted
    final CoordinatorStats stats = doCompactSegments(compactSegments);
    Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
    for (int i = 0; i < 3; i++) {
        verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, TOTAL_SEGMENT_PER_DATASOURCE / 2, 0);
    }
    // Deactivate one datasource (datasource 0 no longer exist in timeline)
    dataSources.remove(DATA_SOURCE_PREFIX + 0);
    // Test run auto compaction with one datasource deactivated
    // Snapshot should not contain deactivated datasource
    doCompactSegments(compactSegments);
    for (int i = 1; i < 3; i++) {
        verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, TOTAL_SEGMENT_PER_DATASOURCE / 2, 0);
    }
    Assert.assertEquals(2, compactSegments.getAutoCompactionSnapshot().size());
    Assert.assertTrue(compactSegments.getAutoCompactionSnapshot().containsKey(DATA_SOURCE_PREFIX + 1));
    Assert.assertTrue(compactSegments.getAutoCompactionSnapshot().containsKey(DATA_SOURCE_PREFIX + 2));
    Assert.assertFalse(compactSegments.getAutoCompactionSnapshot().containsKey(DATA_SOURCE_PREFIX + 0));
}
Also used : HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) Test(org.junit.Test)

Example 19 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method doCompactionAndAssertCompactSegmentStatistics.

private void doCompactionAndAssertCompactSegmentStatistics(CompactSegments compactSegments, int compactionRunCount) {
    for (int dataSourceIndex = 0; dataSourceIndex < 3; dataSourceIndex++) {
        // One compaction task triggered
        final CoordinatorStats stats = doCompactSegments(compactSegments);
        Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
        // (assuming the 4 segments was compacted into 2 segments).
        for (int i = 0; i <= dataSourceIndex; i++) {
            // This verify that dataSource which got slot to compact has correct statistics
            if (i != dataSourceIndex) {
                verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, TOTAL_BYTE_PER_DATASOURCE - 40 * (compactionRunCount + 1), 40 * (compactionRunCount + 1), 0, TOTAL_INTERVAL_PER_DATASOURCE - (compactionRunCount + 1), (compactionRunCount + 1), 0, TOTAL_SEGMENT_PER_DATASOURCE - 4 * (compactionRunCount + 1), 2 * (compactionRunCount + 1), 0);
            } else {
                verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, TOTAL_BYTE_PER_DATASOURCE - 40 * (compactionRunCount + 1), 40 * (compactionRunCount + 1), 0, TOTAL_INTERVAL_PER_DATASOURCE - (compactionRunCount + 1), (compactionRunCount + 1), 0, TOTAL_SEGMENT_PER_DATASOURCE - 4 * (compactionRunCount + 1), 2 * compactionRunCount + 4, 0);
            }
        }
        for (int i = dataSourceIndex + 1; i < 3; i++) {
            // dataSource after dataSourceIndex is not yet compacted. Check that the stats match the expectedBeforeCompaction values
            // This verify that dataSource that ran out of slot has correct statistics
            verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, TOTAL_BYTE_PER_DATASOURCE - 40 * compactionRunCount, 40 * compactionRunCount, 0, TOTAL_INTERVAL_PER_DATASOURCE - compactionRunCount, compactionRunCount, 0, TOTAL_SEGMENT_PER_DATASOURCE - 4 * compactionRunCount, 2 * compactionRunCount, 0);
        }
    }
}
Also used : CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats)

Example 20 with CoordinatorStats

use of org.apache.druid.server.coordinator.CoordinatorStats in project druid by druid-io.

the class CompactSegmentsTest method testRunMultipleCompactionTaskSlots.

@Test
public void testRunMultipleCompactionTaskSlots() {
    final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
    leaderClient.start();
    final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
    final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
    final CoordinatorStats stats = doCompactSegments(compactSegments, 3);
    Assert.assertEquals(3, stats.getGlobalStat(CompactSegments.AVAILABLE_COMPACTION_TASK_SLOT));
    Assert.assertEquals(3, stats.getGlobalStat(CompactSegments.MAX_COMPACTION_TASK_SLOT));
    Assert.assertEquals(3, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
}
Also used : HttpIndexingServiceClient(org.apache.druid.client.indexing.HttpIndexingServiceClient) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) Test(org.junit.Test)

Aggregations

CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)45 DataSegment (org.apache.druid.timeline.DataSegment)31 Test (org.junit.Test)31 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)24 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)22 DruidServer (org.apache.druid.client.DruidServer)15 DruidCoordinatorRuntimeParams (org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams)14 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)14 ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)13 HttpIndexingServiceClient (org.apache.druid.client.indexing.HttpIndexingServiceClient)11 HashMap (java.util.HashMap)10 ArrayList (java.util.ArrayList)9 AutoCompactionSnapshot (org.apache.druid.server.coordinator.AutoCompactionSnapshot)9 List (java.util.List)7 Map (java.util.Map)6 DateTimes (org.apache.druid.java.util.common.DateTimes)6 Intervals (org.apache.druid.java.util.common.Intervals)6 CoordinatorRuntimeParamsTestHelpers (org.apache.druid.server.coordinator.CoordinatorRuntimeParamsTestHelpers)6 Assert (org.junit.Assert)6 Before (org.junit.Before)6