use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactionResource method getCompactionSnapshotForDataSource.
@GET
@Path("/status")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(StateResourceFilter.class)
public Response getCompactionSnapshotForDataSource(@QueryParam("dataSource") String dataSource) {
final Collection<AutoCompactionSnapshot> snapshots;
if (dataSource == null || dataSource.isEmpty()) {
snapshots = coordinator.getAutoCompactionSnapshot().values();
} else {
AutoCompactionSnapshot autoCompactionSnapshot = coordinator.getAutoCompactionSnapshotForDataSource(dataSource);
if (autoCompactionSnapshot == null) {
return Response.status(Response.Status.NOT_FOUND).entity(ImmutableMap.of("error", "unknown dataSource")).build();
}
snapshots = ImmutableList.of(autoCompactionSnapshot);
}
return Response.ok(ImmutableMap.of("latestStatus", snapshots)).build();
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactSegmentsTest method testMakeStatsForDataSourceWithSkipped.
@Test
public void testMakeStatsForDataSourceWithSkipped() {
// Only test and validate for one datasource for simplicity.
// This dataSource has three intervals skipped (3 intervals, 1200 byte, 12 segments skipped by auto compaction)
// Note that these segment used to be 10 bytes each in other tests, we are increasing it to 100 bytes each here
// so that they will be skipped by the auto compaction.
String dataSourceName = DATA_SOURCE_PREFIX + 1;
List<DataSegment> segments = new ArrayList<>();
for (int j : new int[] { 0, 1, 2, 3, 7, 8 }) {
for (int k = 0; k < 4; k++) {
DataSegment beforeNoon = createSegment(dataSourceName, j, true, k);
DataSegment afterNoon = createSegment(dataSourceName, j, false, k);
if (j == 3) {
// Make two intervals on this day skipped (two skipped intervals back-to-back)
beforeNoon = beforeNoon.withSize(100);
afterNoon = afterNoon.withSize(100);
}
if (j == 1) {
// Make one interval on this day skipped
afterNoon = afterNoon.withSize(100);
}
segments.add(beforeNoon);
segments.add(afterNoon);
}
}
dataSources = DataSourcesSnapshot.fromUsedSegments(segments, ImmutableMap.of()).getUsedSegmentsTimelinesPerDataSource();
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
// Before any compaction, we do not have any snapshot of compactions
Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
Assert.assertEquals(0, autoCompactionSnapshots.size());
// 3 intervals, 1200 byte (each segment is 100 bytes), 12 segments will be skipped by auto compaction
for (int compactionRunCount = 0; compactionRunCount < 8; compactionRunCount++) {
// Do a cycle of auto compaction which creates one compaction task
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, dataSourceName, // Minus 120 bytes accounting for the three skipped segments' original size
TOTAL_BYTE_PER_DATASOURCE - 120 - 40 * (compactionRunCount + 1), 40 * (compactionRunCount + 1), 1200, TOTAL_INTERVAL_PER_DATASOURCE - 3 - (compactionRunCount + 1), (compactionRunCount + 1), 3, TOTAL_SEGMENT_PER_DATASOURCE - 12 - 4 * (compactionRunCount + 1), 4 + 2 * (compactionRunCount), 12);
}
// Test that stats does not change (and is still correct) when auto compaction runs with everything is fully compacted
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, dataSourceName, 0, // Minus 120 bytes accounting for the three skipped segments' original size
TOTAL_BYTE_PER_DATASOURCE - 120, 1200, 0, TOTAL_INTERVAL_PER_DATASOURCE - 3, 3, 0, 16, 12);
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactSegmentsTest method verifySnapshot.
private void verifySnapshot(CompactSegments compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus scheduleStatus, String dataSourceName, long expectedByteCountAwaitingCompaction, long expectedByteCountCompressed, long expectedByteCountSkipped, long expectedIntervalCountAwaitingCompaction, long expectedIntervalCountCompressed, long expectedIntervalCountSkipped, long expectedSegmentCountAwaitingCompaction, long expectedSegmentCountCompressed, long expectedSegmentCountSkipped) {
Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
AutoCompactionSnapshot snapshot = autoCompactionSnapshots.get(dataSourceName);
Assert.assertEquals(dataSourceName, snapshot.getDataSource());
Assert.assertEquals(scheduleStatus, snapshot.getScheduleStatus());
Assert.assertEquals(expectedByteCountAwaitingCompaction, snapshot.getBytesAwaitingCompaction());
Assert.assertEquals(expectedByteCountCompressed, snapshot.getBytesCompacted());
Assert.assertEquals(expectedByteCountSkipped, snapshot.getBytesSkipped());
Assert.assertEquals(expectedIntervalCountAwaitingCompaction, snapshot.getIntervalCountAwaitingCompaction());
Assert.assertEquals(expectedIntervalCountCompressed, snapshot.getIntervalCountCompacted());
Assert.assertEquals(expectedIntervalCountSkipped, snapshot.getIntervalCountSkipped());
Assert.assertEquals(expectedSegmentCountAwaitingCompaction, snapshot.getSegmentCountAwaitingCompaction());
Assert.assertEquals(expectedSegmentCountCompressed, snapshot.getSegmentCountCompacted());
Assert.assertEquals(expectedSegmentCountSkipped, snapshot.getSegmentCountSkipped());
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactSegmentsTest method testMakeStatsWithDeactivatedDatasource.
@Test
public void testMakeStatsWithDeactivatedDatasource() {
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
// Before any compaction, we do not have any snapshot of compactions
Map<String, AutoCompactionSnapshot> autoCompactionSnapshots = compactSegments.getAutoCompactionSnapshot();
Assert.assertEquals(0, autoCompactionSnapshots.size());
for (int compactionRunCount = 0; compactionRunCount < 11; compactionRunCount++) {
doCompactionAndAssertCompactSegmentStatistics(compactSegments, compactionRunCount);
}
// Test that stats does not change (and is still correct) when auto compaction runs with everything is fully compacted
final CoordinatorStats stats = doCompactSegments(compactSegments);
Assert.assertEquals(0, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
for (int i = 0; i < 3; i++) {
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, TOTAL_SEGMENT_PER_DATASOURCE / 2, 0);
}
// Deactivate one datasource (datasource 0 no longer exist in timeline)
dataSources.remove(DATA_SOURCE_PREFIX + 0);
// Test run auto compaction with one datasource deactivated
// Snapshot should not contain deactivated datasource
doCompactSegments(compactSegments);
for (int i = 1; i < 3; i++) {
verifySnapshot(compactSegments, AutoCompactionSnapshot.AutoCompactionScheduleStatus.RUNNING, DATA_SOURCE_PREFIX + i, 0, TOTAL_BYTE_PER_DATASOURCE, 0, 0, TOTAL_INTERVAL_PER_DATASOURCE, 0, 0, TOTAL_SEGMENT_PER_DATASOURCE / 2, 0);
}
Assert.assertEquals(2, compactSegments.getAutoCompactionSnapshot().size());
Assert.assertTrue(compactSegments.getAutoCompactionSnapshot().containsKey(DATA_SOURCE_PREFIX + 1));
Assert.assertTrue(compactSegments.getAutoCompactionSnapshot().containsKey(DATA_SOURCE_PREFIX + 2));
Assert.assertFalse(compactSegments.getAutoCompactionSnapshot().containsKey(DATA_SOURCE_PREFIX + 0));
}
use of org.apache.druid.server.coordinator.AutoCompactionSnapshot in project druid by druid-io.
the class CompactionResourceTest method testGetCompactionSnapshotForDataSourceWithNullQueryParameter.
@Test
public void testGetCompactionSnapshotForDataSourceWithNullQueryParameter() {
String dataSourceName = "datasource_1";
Map<String, AutoCompactionSnapshot> expected = ImmutableMap.of(dataSourceName, expectedSnapshot);
EasyMock.expect(mock.getAutoCompactionSnapshot()).andReturn(expected).once();
EasyMock.replay(mock);
final Response response = new CompactionResource(mock).getCompactionSnapshotForDataSource(null);
Assert.assertEquals(ImmutableMap.of("latestStatus", expected.values()), response.getEntity());
Assert.assertEquals(200, response.getStatus());
}
Aggregations