use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.
the class KillCompactionConfigTest method testRunRemoveInactiveDatasourceCompactionConfig.
@Test
public void testRunRemoveInactiveDatasourceCompactionConfig() {
String inactiveDatasourceName = "inactive_datasource";
String activeDatasourceName = "active_datasource";
DataSourceCompactionConfig inactiveDatasourceConfig = new DataSourceCompactionConfig(inactiveDatasourceName, null, 500L, null, new Period(3600), null, new UserCompactionTaskGranularityConfig(Granularities.HOUR, null, null), null, null, null, null, ImmutableMap.of("key", "val"));
DataSourceCompactionConfig activeDatasourceConfig = new DataSourceCompactionConfig(activeDatasourceName, null, 500L, null, new Period(3600), null, new UserCompactionTaskGranularityConfig(Granularities.HOUR, null, null), null, null, null, null, ImmutableMap.of("key", "val"));
CoordinatorCompactionConfig originalCurrentConfig = CoordinatorCompactionConfig.from(ImmutableList.of(inactiveDatasourceConfig, activeDatasourceConfig));
byte[] originalCurrentConfigBytes = { 1, 2, 3 };
Mockito.when(mockConnector.lookup(ArgumentMatchers.anyString(), ArgumentMatchers.eq("name"), ArgumentMatchers.eq("payload"), ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY))).thenReturn(originalCurrentConfigBytes);
Mockito.when(mockJacksonConfigManager.convertByteToConfig(ArgumentMatchers.eq(originalCurrentConfigBytes), ArgumentMatchers.eq(CoordinatorCompactionConfig.class), ArgumentMatchers.eq(CoordinatorCompactionConfig.empty()))).thenReturn(originalCurrentConfig);
Mockito.when(mockDruidCoordinatorRuntimeParams.getEmitter()).thenReturn(mockServiceEmitter);
Mockito.when(mockSqlSegmentsMetadataManager.retrieveAllDataSourceNames()).thenReturn(ImmutableSet.of(activeDatasourceName));
final ArgumentCaptor<byte[]> oldConfigCaptor = ArgumentCaptor.forClass(byte[].class);
final ArgumentCaptor<CoordinatorCompactionConfig> newConfigCaptor = ArgumentCaptor.forClass(CoordinatorCompactionConfig.class);
Mockito.when(mockJacksonConfigManager.set(ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY), oldConfigCaptor.capture(), newConfigCaptor.capture(), ArgumentMatchers.any())).thenReturn(ConfigManager.SetResult.ok());
TestDruidCoordinatorConfig druidCoordinatorConfig = new TestDruidCoordinatorConfig(null, null, null, new Duration("PT5S"), null, null, null, null, null, null, null, new Duration("PT6S"), null, null, null, null, 10, null);
killCompactionConfig = new KillCompactionConfig(druidCoordinatorConfig, mockSqlSegmentsMetadataManager, mockJacksonConfigManager, mockConnector, mockConnectorConfig);
killCompactionConfig.run(mockDruidCoordinatorRuntimeParams);
// Verify and Assert
Assert.assertNotNull(oldConfigCaptor.getValue());
Assert.assertEquals(oldConfigCaptor.getValue(), originalCurrentConfigBytes);
Assert.assertNotNull(newConfigCaptor.getValue());
// The updated config should only contains one compaction config for the active datasource
Assert.assertEquals(1, newConfigCaptor.getValue().getCompactionConfigs().size());
Assert.assertEquals(activeDatasourceConfig, newConfigCaptor.getValue().getCompactionConfigs().get(0));
final ArgumentCaptor<ServiceEventBuilder> emittedEventCaptor = ArgumentCaptor.forClass(ServiceEventBuilder.class);
Mockito.verify(mockServiceEmitter).emit(emittedEventCaptor.capture());
Assert.assertEquals(KillCompactionConfig.COUNT_METRIC, emittedEventCaptor.getValue().build(ImmutableMap.of()).toMap().get("metric"));
// Should delete 1 config
Assert.assertEquals(1, emittedEventCaptor.getValue().build(ImmutableMap.of()).toMap().get("value"));
Mockito.verify(mockJacksonConfigManager).convertByteToConfig(ArgumentMatchers.eq(originalCurrentConfigBytes), ArgumentMatchers.eq(CoordinatorCompactionConfig.class), ArgumentMatchers.eq(CoordinatorCompactionConfig.empty()));
Mockito.verify(mockConnector).lookup(ArgumentMatchers.anyString(), ArgumentMatchers.eq("name"), ArgumentMatchers.eq("payload"), ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY));
Mockito.verify(mockJacksonConfigManager).set(ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY), ArgumentMatchers.any(byte[].class), ArgumentMatchers.any(CoordinatorCompactionConfig.class), ArgumentMatchers.any());
Mockito.verifyNoMoreInteractions(mockJacksonConfigManager);
Mockito.verify(mockSqlSegmentsMetadataManager).retrieveAllDataSourceNames();
Mockito.verifyNoMoreInteractions(mockSqlSegmentsMetadataManager);
}
use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyWithSegmentGranularityAndExistingCompactedSegmentsHaveDifferentSegmentGranularity.
@Test
public void testAutoCompactionDutyWithSegmentGranularityAndExistingCompactedSegmentsHaveDifferentSegmentGranularity() throws Exception {
loadData(INDEX_TASK);
try (final Closeable ignored = unloader(fullDatasourceName)) {
final List<String> intervalsBeforeCompaction = coordinator.getSegmentIntervals(fullDatasourceName);
intervalsBeforeCompaction.sort(null);
// 4 segments across 2 days (4 total)...
verifySegmentsCount(4);
verifyQuery(INDEX_QUERIES_RESOURCE);
// Compacted without SegmentGranularity in auto compaction config
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET);
forceTriggerAutoCompaction(2);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(2, MAX_ROWS_PER_SEGMENT_COMPACTED);
List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
// Segments were compacted and already has DAY granularity since it was initially ingested with DAY granularity.
// Now set auto compaction with DAY granularity in the granularitySpec
Granularity newGranularity = Granularities.YEAR;
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null));
forceTriggerAutoCompaction(1);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(1, MAX_ROWS_PER_SEGMENT_COMPACTED);
// There should be new compaction tasks since SegmentGranularity changed from DAY to YEAR
List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
Assert.assertTrue(compactTasksAfter.size() > compactTasksBefore.size());
}
}
use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyWithQueryGranularity.
@Test
public void testAutoCompactionDutyWithQueryGranularity() throws Exception {
final ISOChronology chrono = ISOChronology.getInstance(DateTimes.inferTzFromString("America/Los_Angeles"));
Map<String, Object> specs = ImmutableMap.of("%%GRANULARITYSPEC%%", new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, true, ImmutableList.of(new Interval("2013-08-31/2013-09-02", chrono))));
loadData(INDEX_TASK_WITH_GRANULARITY_SPEC, specs);
try (final Closeable ignored = unloader(fullDatasourceName)) {
Map<String, Object> expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 2, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(57.0), ImmutableList.of(459.0)))));
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(null, Granularities.DAY, null), false);
forceTriggerAutoCompaction(2);
expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 1, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(516.0)))));
verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
verifySegmentsCompacted(2, MAX_ROWS_PER_SEGMENT_COMPACTED);
List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
// Verify rollup segments does not get compacted again
forceTriggerAutoCompaction(2);
List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
Assert.assertEquals(compactTasksAfter.size(), compactTasksBefore.size());
}
}
use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyWithSegmentGranularityAndSmallerSegmentGranularityCoveringMultipleSegmentsInTimelineAndDropExistingFalse.
@Test
public void testAutoCompactionDutyWithSegmentGranularityAndSmallerSegmentGranularityCoveringMultipleSegmentsInTimelineAndDropExistingFalse() throws Exception {
loadData(INDEX_TASK);
try (final Closeable ignored = unloader(fullDatasourceName)) {
final List<String> intervalsBeforeCompaction = coordinator.getSegmentIntervals(fullDatasourceName);
intervalsBeforeCompaction.sort(null);
// 4 segments across 2 days (4 total)...
verifySegmentsCount(4);
verifyQuery(INDEX_QUERIES_RESOURCE);
Granularity newGranularity = Granularities.YEAR;
// Set dropExisting to false
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null), false);
List<String> expectedIntervalAfterCompaction = new ArrayList<>();
// We wil have one segment with interval of 2013-01-01/2014-01-01 (compacted with YEAR)
for (String interval : intervalsBeforeCompaction) {
for (Interval newinterval : newGranularity.getIterable(new Interval(interval, ISOChronology.getInstanceUTC()))) {
expectedIntervalAfterCompaction.add(newinterval.toString());
}
}
forceTriggerAutoCompaction(1);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(1, MAX_ROWS_PER_SEGMENT_COMPACTED);
checkCompactionIntervals(expectedIntervalAfterCompaction);
loadData(INDEX_TASK);
verifySegmentsCount(5);
verifyQuery(INDEX_QUERIES_RESOURCE);
// 5 segments. 1 compacted YEAR segment and 4 newly ingested DAY segments across 2 days
// We wil have one segment with interval of 2013-01-01/2014-01-01 (compacted with YEAR) from the compaction earlier
// two segments with interval of 2013-08-31/2013-09-01 (newly ingested with DAY)
// and two segments with interval of 2013-09-01/2013-09-02 (newly ingested with DAY)
expectedIntervalAfterCompaction.addAll(intervalsBeforeCompaction);
checkCompactionIntervals(expectedIntervalAfterCompaction);
newGranularity = Granularities.MONTH;
// Set dropExisting to false
submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null), false);
// Since dropExisting is set to true...
// This will submit a single compaction task for interval of 2013-01-01/2014-01-01 with MONTH granularity
expectedIntervalAfterCompaction = new ArrayList<>();
// We wil have one segment with interval of 2013-01-01/2014-01-01 (compacted with YEAR) from before the compaction
for (String interval : intervalsBeforeCompaction) {
for (Interval newinterval : Granularities.YEAR.getIterable(new Interval(interval, ISOChronology.getInstanceUTC()))) {
expectedIntervalAfterCompaction.add(newinterval.toString());
}
}
// and one segments with interval of 2013-10-01/2013-11-01 (compacted with MONTH)
for (String interval : intervalsBeforeCompaction) {
for (Interval newinterval : Granularities.MONTH.getIterable(new Interval(interval, ISOChronology.getInstanceUTC()))) {
expectedIntervalAfterCompaction.add(newinterval.toString());
}
}
forceTriggerAutoCompaction(3);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(3, MAX_ROWS_PER_SEGMENT_COMPACTED);
checkCompactionIntervals(expectedIntervalAfterCompaction);
}
}
use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.
the class ITAutoCompactionUpgradeTest method testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist.
@Test
public void testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist() throws Exception {
// Verify that compaction config already exist. This config was inserted manually into the database using SQL script.
// This auto compaction configuration payload is from Druid 0.21.0
CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
DataSourceCompactionConfig foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
// Now submit a new auto compaction configuration
PartitionsSpec newPartitionsSpec = new DynamicPartitionsSpec(4000, null);
Period newSkipOffset = Period.seconds(0);
DataSourceCompactionConfig compactionConfig = new DataSourceCompactionConfig(UPGRADE_DATASOURCE_NAME, null, null, null, newSkipOffset, new UserCompactionTaskQueryTuningConfig(null, null, null, new MaxSizeSplitHintSpec(null, 1), newPartitionsSpec, null, null, null, null, null, 1, null, null, null, null, null, 1), new UserCompactionTaskGranularityConfig(Granularities.YEAR, null, null), null, null, null, new UserCompactionTaskIOConfig(true), null);
compactionResource.submitCompactionConfig(compactionConfig);
// Wait for compaction config to persist
Thread.sleep(2000);
// Verify that compaction was successfully updated
coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
Assert.assertNotNull(foundDataSourceCompactionConfig.getTuningConfig());
Assert.assertEquals(foundDataSourceCompactionConfig.getTuningConfig().getPartitionsSpec(), newPartitionsSpec);
Assert.assertEquals(foundDataSourceCompactionConfig.getSkipOffsetFromLatest(), newSkipOffset);
}
Aggregations