Search in sources :

Example 11 with UserCompactionTaskGranularityConfig

use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.

the class KillCompactionConfigTest method testRunRemoveInactiveDatasourceCompactionConfig.

@Test
public void testRunRemoveInactiveDatasourceCompactionConfig() {
    String inactiveDatasourceName = "inactive_datasource";
    String activeDatasourceName = "active_datasource";
    DataSourceCompactionConfig inactiveDatasourceConfig = new DataSourceCompactionConfig(inactiveDatasourceName, null, 500L, null, new Period(3600), null, new UserCompactionTaskGranularityConfig(Granularities.HOUR, null, null), null, null, null, null, ImmutableMap.of("key", "val"));
    DataSourceCompactionConfig activeDatasourceConfig = new DataSourceCompactionConfig(activeDatasourceName, null, 500L, null, new Period(3600), null, new UserCompactionTaskGranularityConfig(Granularities.HOUR, null, null), null, null, null, null, ImmutableMap.of("key", "val"));
    CoordinatorCompactionConfig originalCurrentConfig = CoordinatorCompactionConfig.from(ImmutableList.of(inactiveDatasourceConfig, activeDatasourceConfig));
    byte[] originalCurrentConfigBytes = { 1, 2, 3 };
    Mockito.when(mockConnector.lookup(ArgumentMatchers.anyString(), ArgumentMatchers.eq("name"), ArgumentMatchers.eq("payload"), ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY))).thenReturn(originalCurrentConfigBytes);
    Mockito.when(mockJacksonConfigManager.convertByteToConfig(ArgumentMatchers.eq(originalCurrentConfigBytes), ArgumentMatchers.eq(CoordinatorCompactionConfig.class), ArgumentMatchers.eq(CoordinatorCompactionConfig.empty()))).thenReturn(originalCurrentConfig);
    Mockito.when(mockDruidCoordinatorRuntimeParams.getEmitter()).thenReturn(mockServiceEmitter);
    Mockito.when(mockSqlSegmentsMetadataManager.retrieveAllDataSourceNames()).thenReturn(ImmutableSet.of(activeDatasourceName));
    final ArgumentCaptor<byte[]> oldConfigCaptor = ArgumentCaptor.forClass(byte[].class);
    final ArgumentCaptor<CoordinatorCompactionConfig> newConfigCaptor = ArgumentCaptor.forClass(CoordinatorCompactionConfig.class);
    Mockito.when(mockJacksonConfigManager.set(ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY), oldConfigCaptor.capture(), newConfigCaptor.capture(), ArgumentMatchers.any())).thenReturn(ConfigManager.SetResult.ok());
    TestDruidCoordinatorConfig druidCoordinatorConfig = new TestDruidCoordinatorConfig(null, null, null, new Duration("PT5S"), null, null, null, null, null, null, null, new Duration("PT6S"), null, null, null, null, 10, null);
    killCompactionConfig = new KillCompactionConfig(druidCoordinatorConfig, mockSqlSegmentsMetadataManager, mockJacksonConfigManager, mockConnector, mockConnectorConfig);
    killCompactionConfig.run(mockDruidCoordinatorRuntimeParams);
    // Verify and Assert
    Assert.assertNotNull(oldConfigCaptor.getValue());
    Assert.assertEquals(oldConfigCaptor.getValue(), originalCurrentConfigBytes);
    Assert.assertNotNull(newConfigCaptor.getValue());
    // The updated config should only contains one compaction config for the active datasource
    Assert.assertEquals(1, newConfigCaptor.getValue().getCompactionConfigs().size());
    Assert.assertEquals(activeDatasourceConfig, newConfigCaptor.getValue().getCompactionConfigs().get(0));
    final ArgumentCaptor<ServiceEventBuilder> emittedEventCaptor = ArgumentCaptor.forClass(ServiceEventBuilder.class);
    Mockito.verify(mockServiceEmitter).emit(emittedEventCaptor.capture());
    Assert.assertEquals(KillCompactionConfig.COUNT_METRIC, emittedEventCaptor.getValue().build(ImmutableMap.of()).toMap().get("metric"));
    // Should delete 1 config
    Assert.assertEquals(1, emittedEventCaptor.getValue().build(ImmutableMap.of()).toMap().get("value"));
    Mockito.verify(mockJacksonConfigManager).convertByteToConfig(ArgumentMatchers.eq(originalCurrentConfigBytes), ArgumentMatchers.eq(CoordinatorCompactionConfig.class), ArgumentMatchers.eq(CoordinatorCompactionConfig.empty()));
    Mockito.verify(mockConnector).lookup(ArgumentMatchers.anyString(), ArgumentMatchers.eq("name"), ArgumentMatchers.eq("payload"), ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY));
    Mockito.verify(mockJacksonConfigManager).set(ArgumentMatchers.eq(CoordinatorCompactionConfig.CONFIG_KEY), ArgumentMatchers.any(byte[].class), ArgumentMatchers.any(CoordinatorCompactionConfig.class), ArgumentMatchers.any());
    Mockito.verifyNoMoreInteractions(mockJacksonConfigManager);
    Mockito.verify(mockSqlSegmentsMetadataManager).retrieveAllDataSourceNames();
    Mockito.verifyNoMoreInteractions(mockSqlSegmentsMetadataManager);
}
Also used : ServiceEventBuilder(org.apache.druid.java.util.emitter.service.ServiceEventBuilder) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) Period(org.joda.time.Period) Duration(org.joda.time.Duration) TestDruidCoordinatorConfig(org.apache.druid.server.coordinator.TestDruidCoordinatorConfig) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) Test(org.junit.Test)

Example 12 with UserCompactionTaskGranularityConfig

use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.

the class ITAutoCompactionTest method testAutoCompactionDutyWithSegmentGranularityAndExistingCompactedSegmentsHaveDifferentSegmentGranularity.

@Test
public void testAutoCompactionDutyWithSegmentGranularityAndExistingCompactedSegmentsHaveDifferentSegmentGranularity() throws Exception {
    loadData(INDEX_TASK);
    try (final Closeable ignored = unloader(fullDatasourceName)) {
        final List<String> intervalsBeforeCompaction = coordinator.getSegmentIntervals(fullDatasourceName);
        intervalsBeforeCompaction.sort(null);
        // 4 segments across 2 days (4 total)...
        verifySegmentsCount(4);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        // Compacted without SegmentGranularity in auto compaction config
        submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET);
        forceTriggerAutoCompaction(2);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        verifySegmentsCompacted(2, MAX_ROWS_PER_SEGMENT_COMPACTED);
        List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
        // Segments were compacted and already has DAY granularity since it was initially ingested with DAY granularity.
        // Now set auto compaction with DAY granularity in the granularitySpec
        Granularity newGranularity = Granularities.YEAR;
        submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null));
        forceTriggerAutoCompaction(1);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        verifySegmentsCompacted(1, MAX_ROWS_PER_SEGMENT_COMPACTED);
        // There should be new compaction tasks since SegmentGranularity changed from DAY to YEAR
        List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
        Assert.assertTrue(compactTasksAfter.size() > compactTasksBefore.size());
    }
}
Also used : TaskResponseObject(org.apache.druid.testing.clients.TaskResponseObject) Closeable(java.io.Closeable) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Test(org.testng.annotations.Test) AbstractIndexerTest(org.apache.druid.tests.indexer.AbstractIndexerTest) AbstractITBatchIndexTest(org.apache.druid.tests.indexer.AbstractITBatchIndexTest)

Example 13 with UserCompactionTaskGranularityConfig

use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.

the class ITAutoCompactionTest method testAutoCompactionDutyWithQueryGranularity.

@Test
public void testAutoCompactionDutyWithQueryGranularity() throws Exception {
    final ISOChronology chrono = ISOChronology.getInstance(DateTimes.inferTzFromString("America/Los_Angeles"));
    Map<String, Object> specs = ImmutableMap.of("%%GRANULARITYSPEC%%", new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, true, ImmutableList.of(new Interval("2013-08-31/2013-09-02", chrono))));
    loadData(INDEX_TASK_WITH_GRANULARITY_SPEC, specs);
    try (final Closeable ignored = unloader(fullDatasourceName)) {
        Map<String, Object> expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 2, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(57.0), ImmutableList.of(459.0)))));
        verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
        submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(null, Granularities.DAY, null), false);
        forceTriggerAutoCompaction(2);
        expectedResult = ImmutableMap.of("%%FIELD_TO_QUERY%%", "added", "%%EXPECTED_COUNT_RESULT%%", 1, "%%EXPECTED_SCAN_RESULT%%", ImmutableList.of(ImmutableMap.of("events", ImmutableList.of(ImmutableList.of(516.0)))));
        verifyQuery(INDEX_ROLLUP_QUERIES_RESOURCE, expectedResult);
        verifySegmentsCompacted(2, MAX_ROWS_PER_SEGMENT_COMPACTED);
        List<TaskResponseObject> compactTasksBefore = indexer.getCompleteTasksForDataSource(fullDatasourceName);
        // Verify rollup segments does not get compacted again
        forceTriggerAutoCompaction(2);
        List<TaskResponseObject> compactTasksAfter = indexer.getCompleteTasksForDataSource(fullDatasourceName);
        Assert.assertEquals(compactTasksAfter.size(), compactTasksBefore.size());
    }
}
Also used : ISOChronology(org.joda.time.chrono.ISOChronology) UniformGranularitySpec(org.apache.druid.segment.indexing.granularity.UniformGranularitySpec) TaskResponseObject(org.apache.druid.testing.clients.TaskResponseObject) Closeable(java.io.Closeable) TaskResponseObject(org.apache.druid.testing.clients.TaskResponseObject) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) Interval(org.joda.time.Interval) Test(org.testng.annotations.Test) AbstractIndexerTest(org.apache.druid.tests.indexer.AbstractIndexerTest) AbstractITBatchIndexTest(org.apache.druid.tests.indexer.AbstractITBatchIndexTest)

Example 14 with UserCompactionTaskGranularityConfig

use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.

the class ITAutoCompactionTest method testAutoCompactionDutyWithSegmentGranularityAndSmallerSegmentGranularityCoveringMultipleSegmentsInTimelineAndDropExistingFalse.

@Test
public void testAutoCompactionDutyWithSegmentGranularityAndSmallerSegmentGranularityCoveringMultipleSegmentsInTimelineAndDropExistingFalse() throws Exception {
    loadData(INDEX_TASK);
    try (final Closeable ignored = unloader(fullDatasourceName)) {
        final List<String> intervalsBeforeCompaction = coordinator.getSegmentIntervals(fullDatasourceName);
        intervalsBeforeCompaction.sort(null);
        // 4 segments across 2 days (4 total)...
        verifySegmentsCount(4);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        Granularity newGranularity = Granularities.YEAR;
        // Set dropExisting to false
        submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null), false);
        List<String> expectedIntervalAfterCompaction = new ArrayList<>();
        // We wil have one segment with interval of 2013-01-01/2014-01-01 (compacted with YEAR)
        for (String interval : intervalsBeforeCompaction) {
            for (Interval newinterval : newGranularity.getIterable(new Interval(interval, ISOChronology.getInstanceUTC()))) {
                expectedIntervalAfterCompaction.add(newinterval.toString());
            }
        }
        forceTriggerAutoCompaction(1);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        verifySegmentsCompacted(1, MAX_ROWS_PER_SEGMENT_COMPACTED);
        checkCompactionIntervals(expectedIntervalAfterCompaction);
        loadData(INDEX_TASK);
        verifySegmentsCount(5);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        // 5 segments. 1 compacted YEAR segment and 4 newly ingested DAY segments across 2 days
        // We wil have one segment with interval of 2013-01-01/2014-01-01 (compacted with YEAR) from the compaction earlier
        // two segments with interval of 2013-08-31/2013-09-01 (newly ingested with DAY)
        // and two segments with interval of 2013-09-01/2013-09-02 (newly ingested with DAY)
        expectedIntervalAfterCompaction.addAll(intervalsBeforeCompaction);
        checkCompactionIntervals(expectedIntervalAfterCompaction);
        newGranularity = Granularities.MONTH;
        // Set dropExisting to false
        submitCompactionConfig(MAX_ROWS_PER_SEGMENT_COMPACTED, NO_SKIP_OFFSET, new UserCompactionTaskGranularityConfig(newGranularity, null, null), false);
        // Since dropExisting is set to true...
        // This will submit a single compaction task for interval of 2013-01-01/2014-01-01 with MONTH granularity
        expectedIntervalAfterCompaction = new ArrayList<>();
        // We wil have one segment with interval of 2013-01-01/2014-01-01 (compacted with YEAR) from before the compaction
        for (String interval : intervalsBeforeCompaction) {
            for (Interval newinterval : Granularities.YEAR.getIterable(new Interval(interval, ISOChronology.getInstanceUTC()))) {
                expectedIntervalAfterCompaction.add(newinterval.toString());
            }
        }
        // and one segments with interval of 2013-10-01/2013-11-01 (compacted with MONTH)
        for (String interval : intervalsBeforeCompaction) {
            for (Interval newinterval : Granularities.MONTH.getIterable(new Interval(interval, ISOChronology.getInstanceUTC()))) {
                expectedIntervalAfterCompaction.add(newinterval.toString());
            }
        }
        forceTriggerAutoCompaction(3);
        verifyQuery(INDEX_QUERIES_RESOURCE);
        verifySegmentsCompacted(3, MAX_ROWS_PER_SEGMENT_COMPACTED);
        checkCompactionIntervals(expectedIntervalAfterCompaction);
    }
}
Also used : Closeable(java.io.Closeable) ArrayList(java.util.ArrayList) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Interval(org.joda.time.Interval) Test(org.testng.annotations.Test) AbstractIndexerTest(org.apache.druid.tests.indexer.AbstractIndexerTest) AbstractITBatchIndexTest(org.apache.druid.tests.indexer.AbstractITBatchIndexTest)

Example 15 with UserCompactionTaskGranularityConfig

use of org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig in project druid by druid-io.

the class ITAutoCompactionUpgradeTest method testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist.

@Test
public void testUpgradeAutoCompactionConfigurationWhenConfigurationFromOlderVersionAlreadyExist() throws Exception {
    // Verify that compaction config already exist. This config was inserted manually into the database using SQL script.
    // This auto compaction configuration payload is from Druid 0.21.0
    CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
    DataSourceCompactionConfig foundDataSourceCompactionConfig = null;
    for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
        if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
            foundDataSourceCompactionConfig = dataSourceCompactionConfig;
        }
    }
    Assert.assertNotNull(foundDataSourceCompactionConfig);
    // Now submit a new auto compaction configuration
    PartitionsSpec newPartitionsSpec = new DynamicPartitionsSpec(4000, null);
    Period newSkipOffset = Period.seconds(0);
    DataSourceCompactionConfig compactionConfig = new DataSourceCompactionConfig(UPGRADE_DATASOURCE_NAME, null, null, null, newSkipOffset, new UserCompactionTaskQueryTuningConfig(null, null, null, new MaxSizeSplitHintSpec(null, 1), newPartitionsSpec, null, null, null, null, null, 1, null, null, null, null, null, 1), new UserCompactionTaskGranularityConfig(Granularities.YEAR, null, null), null, null, null, new UserCompactionTaskIOConfig(true), null);
    compactionResource.submitCompactionConfig(compactionConfig);
    // Wait for compaction config to persist
    Thread.sleep(2000);
    // Verify that compaction was successfully updated
    coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
    foundDataSourceCompactionConfig = null;
    for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
        if (dataSourceCompactionConfig.getDataSource().equals(UPGRADE_DATASOURCE_NAME)) {
            foundDataSourceCompactionConfig = dataSourceCompactionConfig;
        }
    }
    Assert.assertNotNull(foundDataSourceCompactionConfig);
    Assert.assertNotNull(foundDataSourceCompactionConfig.getTuningConfig());
    Assert.assertEquals(foundDataSourceCompactionConfig.getTuningConfig().getPartitionsSpec(), newPartitionsSpec);
    Assert.assertEquals(foundDataSourceCompactionConfig.getSkipOffsetFromLatest(), newSkipOffset);
}
Also used : CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) DynamicPartitionsSpec(org.apache.druid.indexer.partitions.DynamicPartitionsSpec) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) DynamicPartitionsSpec(org.apache.druid.indexer.partitions.DynamicPartitionsSpec) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) UserCompactionTaskIOConfig(org.apache.druid.server.coordinator.UserCompactionTaskIOConfig) Period(org.joda.time.Period) UserCompactionTaskQueryTuningConfig(org.apache.druid.server.coordinator.UserCompactionTaskQueryTuningConfig) UserCompactionTaskGranularityConfig(org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig) MaxSizeSplitHintSpec(org.apache.druid.data.input.MaxSizeSplitHintSpec) Test(org.testng.annotations.Test) AbstractIndexerTest(org.apache.druid.tests.indexer.AbstractIndexerTest)

Aggregations

UserCompactionTaskGranularityConfig (org.apache.druid.server.coordinator.UserCompactionTaskGranularityConfig)36 Period (org.joda.time.Period)27 Test (org.junit.Test)26 ArrayList (java.util.ArrayList)24 DataSegment (org.apache.druid.timeline.DataSegment)18 PartitionsSpec (org.apache.druid.indexer.partitions.PartitionsSpec)10 DataSourceCompactionConfig (org.apache.druid.server.coordinator.DataSourceCompactionConfig)10 AbstractIndexerTest (org.apache.druid.tests.indexer.AbstractIndexerTest)10 Test (org.testng.annotations.Test)10 ImmutableMap (com.google.common.collect.ImmutableMap)9 Closeable (java.io.Closeable)9 Map (java.util.Map)9 IndexSpec (org.apache.druid.segment.IndexSpec)9 AbstractITBatchIndexTest (org.apache.druid.tests.indexer.AbstractITBatchIndexTest)9 CompactionState (org.apache.druid.timeline.CompactionState)9 Granularity (org.apache.druid.java.util.common.granularity.Granularity)7 Interval (org.joda.time.Interval)7 CoordinatorCompactionConfig (org.apache.druid.server.coordinator.CoordinatorCompactionConfig)6 UserCompactionTaskQueryTuningConfig (org.apache.druid.server.coordinator.UserCompactionTaskQueryTuningConfig)5 ImmutableList (com.google.common.collect.ImmutableList)4