use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactSegments method makeStats.
private CoordinatorStats makeStats(Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders, int numCompactionTasks, CompactionSegmentIterator iterator) {
final Map<String, AutoCompactionSnapshot> currentAutoCompactionSnapshotPerDataSource = new HashMap<>();
final CoordinatorStats stats = new CoordinatorStats();
stats.addToGlobalStat(COMPACTION_TASK_COUNT, numCompactionTasks);
// the statistic to the AwaitingCompaction statistics
while (iterator.hasNext()) {
final List<DataSegment> segmentsToCompact = iterator.next();
if (!segmentsToCompact.isEmpty()) {
final String dataSourceName = segmentsToCompact.get(0).getDataSource();
AutoCompactionSnapshot.Builder snapshotBuilder = currentRunAutoCompactionSnapshotBuilders.computeIfAbsent(dataSourceName, k -> new AutoCompactionSnapshot.Builder(k, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING));
snapshotBuilder.incrementBytesAwaitingCompaction(segmentsToCompact.stream().mapToLong(DataSegment::getSize).sum());
snapshotBuilder.incrementIntervalCountAwaitingCompaction(segmentsToCompact.stream().map(DataSegment::getInterval).distinct().count());
snapshotBuilder.incrementSegmentCountAwaitingCompaction(segmentsToCompact.size());
}
}
// Statistics of all segments considered compacted after this run
Map<String, CompactionStatistics> allCompactedStatistics = iterator.totalCompactedStatistics();
for (Map.Entry<String, CompactionStatistics> compactionStatisticsEntry : allCompactedStatistics.entrySet()) {
final String dataSource = compactionStatisticsEntry.getKey();
final CompactionStatistics dataSourceCompactedStatistics = compactionStatisticsEntry.getValue();
AutoCompactionSnapshot.Builder builder = currentRunAutoCompactionSnapshotBuilders.computeIfAbsent(dataSource, k -> new AutoCompactionSnapshot.Builder(k, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING));
builder.incrementBytesCompacted(dataSourceCompactedStatistics.getByteSum());
builder.incrementSegmentCountCompacted(dataSourceCompactedStatistics.getSegmentNumberCountSum());
builder.incrementIntervalCountCompacted(dataSourceCompactedStatistics.getSegmentIntervalCountSum());
}
// Statistics of all segments considered skipped after this run
Map<String, CompactionStatistics> allSkippedStatistics = iterator.totalSkippedStatistics();
for (Map.Entry<String, CompactionStatistics> compactionStatisticsEntry : allSkippedStatistics.entrySet()) {
final String dataSource = compactionStatisticsEntry.getKey();
final CompactionStatistics dataSourceSkippedStatistics = compactionStatisticsEntry.getValue();
AutoCompactionSnapshot.Builder builder = currentRunAutoCompactionSnapshotBuilders.computeIfAbsent(dataSource, k -> new AutoCompactionSnapshot.Builder(k, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING));
builder.incrementBytesSkipped(dataSourceSkippedStatistics.getByteSum());
builder.incrementSegmentCountSkipped(dataSourceSkippedStatistics.getSegmentNumberCountSum());
builder.incrementIntervalCountSkipped(dataSourceSkippedStatistics.getSegmentIntervalCountSum());
}
for (Map.Entry<String, AutoCompactionSnapshot.Builder> autoCompactionSnapshotBuilderEntry : currentRunAutoCompactionSnapshotBuilders.entrySet()) {
final String dataSource = autoCompactionSnapshotBuilderEntry.getKey();
final AutoCompactionSnapshot.Builder builder = autoCompactionSnapshotBuilderEntry.getValue();
// Build the complete snapshot for the datasource
AutoCompactionSnapshot autoCompactionSnapshot = builder.build();
currentAutoCompactionSnapshotPerDataSource.put(dataSource, autoCompactionSnapshot);
// Use the complete snapshot to emits metrics
stats.addToDataSourceStat(TOTAL_SIZE_OF_SEGMENTS_AWAITING, dataSource, autoCompactionSnapshot.getBytesAwaitingCompaction());
stats.addToDataSourceStat(TOTAL_COUNT_OF_SEGMENTS_AWAITING, dataSource, autoCompactionSnapshot.getSegmentCountAwaitingCompaction());
stats.addToDataSourceStat(TOTAL_INTERVAL_OF_SEGMENTS_AWAITING, dataSource, autoCompactionSnapshot.getIntervalCountAwaitingCompaction());
stats.addToDataSourceStat(TOTAL_SIZE_OF_SEGMENTS_COMPACTED, dataSource, autoCompactionSnapshot.getBytesCompacted());
stats.addToDataSourceStat(TOTAL_COUNT_OF_SEGMENTS_COMPACTED, dataSource, autoCompactionSnapshot.getSegmentCountCompacted());
stats.addToDataSourceStat(TOTAL_INTERVAL_OF_SEGMENTS_COMPACTED, dataSource, autoCompactionSnapshot.getIntervalCountCompacted());
stats.addToDataSourceStat(TOTAL_SIZE_OF_SEGMENTS_SKIPPED, dataSource, autoCompactionSnapshot.getBytesSkipped());
stats.addToDataSourceStat(TOTAL_COUNT_OF_SEGMENTS_SKIPPED, dataSource, autoCompactionSnapshot.getSegmentCountSkipped());
stats.addToDataSourceStat(TOTAL_INTERVAL_OF_SEGMENTS_SKIPPED, dataSource, autoCompactionSnapshot.getIntervalCountSkipped());
}
// Atomic update of autoCompactionSnapshotPerDataSource with the latest from this coordinator run
autoCompactionSnapshotPerDataSource.set(currentAutoCompactionSnapshotPerDataSource);
return stats;
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactionResourceTest method testGetCompactionSnapshotForDataSourceWithEmptyQueryParameter.
@Test
public void testGetCompactionSnapshotForDataSourceWithEmptyQueryParameter() {
Map<String, AutoCompactionSnapshot> expected = ImmutableMap.of(dataSourceName, expectedSnapshot);
EasyMock.expect(mock.getAutoCompactionSnapshot()).andReturn(expected).once();
EasyMock.replay(mock);
final Response response = new CompactionResource(mock).getCompactionSnapshotForDataSource("");
Assert.assertEquals(ImmutableMap.of("latestStatus", expected.values()), response.getEntity());
Assert.assertEquals(200, response.getStatus());
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactSegmentsTest method testMakeStats.
@Test
public void testMakeStats() {
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
// Before any compaction, we do not have any snapshot of compactions
Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
Assert.assertEquals(0, autoCompactionSnapshots.size());
for (int compactionRunCount = 0; compactionRunCount < 11; compactionRunCount++) {
doCompactionAndAssertCompactSegmentStatistics(compactSegments, compactionRunCount);
}
// Test that stats does not change (and is still correct) when auto compaction runs with everything is fully compacted
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
for (int i = 0; i < 3; i++) {
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, TOTAL_SEGMENT_PER_DATASOURCE / 2, 0);
}
// Test run auto compaction with one datasource auto compaction disabled
// Snapshot should not contain datasource with auto compaction disabled
List<DataSourceCompactionConfig> removedOneConfig = createCompactionConfigs();
removedOneConfig.remove(0);
doCompactSegments(compactSegments, removedOneConfig);
for (int i = 1; i < 3; i++) {
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, TOTAL_SEGMENT_PER_DATASOURCE / 2, 0);
}
// Run auto compaction without any dataSource in the compaction config
// Snapshot should be empty
doCompactSegments(compactSegments, new ArrayList<>());
Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
Assert.assertTrue(compactSegments.getAutoCompactionSnapshot().isEmpty());
assertLastSegmentNotCompacted(compactSegments);
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactSegmentsTest method testMakeStatsForDataSourceWithCompactedIntervalBetweenNonCompactedIntervals.
@Test
public void testMakeStatsForDataSourceWithCompactedIntervalBetweenNonCompactedIntervals() {
// Only test and validate for one datasource for simplicity.
// This dataSource has three intervals already compacted (3 intervals, 120 byte, 12 segments already compacted)
String dataSourceName = DATA_SOURCE_PREFIX + 1;
List<DataSegment> segments = new ArrayList<>();
for (int j : new int[] { 0, 1, 2, 3, 7, 8 }) {
for (int k = 0; k < PARTITION_PER_TIME_INTERVAL; k++) {
DataSegment beforeNoon = createSegment(dataSourceName, j, true, k);
DataSegment afterNoon = createSegment(dataSourceName, j, false, k);
if (j == 3) {
// Make two intervals on this day compacted (two compacted intervals back-to-back)
beforeNoon = beforeNoon.withLastCompactionState(new CompactionState(partitionsSpec, null, null, null, ImmutableMap.of(), ImmutableMap.of()));
afterNoon = afterNoon.withLastCompactionState(new CompactionState(partitionsSpec, null, null, null, ImmutableMap.of(), ImmutableMap.of()));
}
if (j == 1) {
// Make one interval on this day compacted
afterNoon = afterNoon.withLastCompactionState(new CompactionState(partitionsSpec, null, null, null, ImmutableMap.of(), ImmutableMap.of()));
}
segments.add(beforeNoon);
segments.add(afterNoon);
}
}
dataSources = DataSourcesSnapshot.fromUsedSegments(segments, ImmutableMap.of()).getUsedSegmentsTimelinesPerDataSource();
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
// Before any compaction, we do not have any snapshot of compactions
Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
Assert.assertEquals(0, autoCompactionSnapshots.size());
// 3 intervals, 120 byte, 12 segments already compacted before the run
for (int compactionRunCount = 0; compactionRunCount < 8; compactionRunCount++) {
// Do a cycle of auto compaction which creates one compaction task
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, dataSourceName, TOTAL_BYTE_PER_DATASOURCE - 120 - 40 * (compactionRunCount + 1), 120 + 40 * (compactionRunCount + 1), 0, TOTAL_INTERVAL_PER_DATASOURCE - 3 - (compactionRunCount + 1), 3 + (compactionRunCount + 1), 0, TOTAL_SEGMENT_PER_DATASOURCE - 12 - 4 * (compactionRunCount + 1), // Each previous auto compaction run resulted in 2 compacted segments (4 segments compacted into 2 segments)
12 + 4 + 2 * (compactionRunCount), 0);
}
// Test that stats does not change (and is still correct) when auto compaction runs with everything is fully compacted
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, dataSourceName, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, // 32 segments needs compaction which is now compacted into 16 segments (4 segments compacted into 2 segments each run)
12 + 16, 0);
}
Aggregations