use of org.apache.druid.server.coordinator.CoordinatorCompactionConfig in project druid by druid-io.
the class KillCompactionConfig method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
long currentTimeMillis = System.currentTimeMillis();
if ((lastKillTime + period) < currentTimeMillis) {
lastKillTime = currentTimeMillis;
try {
RetryUtils.retry(() -> {
final byte[] currentBytes = CoordinatorCompactionConfig.getConfigInByteFromDb(connector, connectorConfig);
final CoordinatorCompactionConfig current = CoordinatorCompactionConfig.convertByteToConfig(jacksonConfigManager, currentBytes);
// If current compaction config is empty then there is nothing to do
if (CoordinatorCompactionConfig.empty().equals(current)) {
log.info("Finished running KillCompactionConfig duty. Nothing to do as compaction config is already empty.");
emitMetric(params.getEmitter(), 0);
return ConfigManager.SetResult.ok();
}
// Get all active datasources
// Note that we get all active datasources after getting compaction config to prevent race condition if new
// datasource and config are added.
Set<String> activeDatasources = sqlSegmentsMetadataManager.retrieveAllDataSourceNames();
final Map<String, DataSourceCompactionConfig> updated = current.getCompactionConfigs().stream().filter(dataSourceCompactionConfig -> activeDatasources.contains(dataSourceCompactionConfig.getDataSource())).collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
// Calculate number of compaction configs to remove for logging
int compactionConfigRemoved = current.getCompactionConfigs().size() - updated.size();
ConfigManager.SetResult result = jacksonConfigManager.set(CoordinatorCompactionConfig.CONFIG_KEY, currentBytes, CoordinatorCompactionConfig.from(current, ImmutableList.copyOf(updated.values())), new AuditInfo("KillCompactionConfig", "CoordinatorDuty for automatic deletion of compaction config", ""));
if (result.isOk()) {
log.info("Finished running KillCompactionConfig duty. Removed %,d compaction configs", compactionConfigRemoved);
emitMetric(params.getEmitter(), compactionConfigRemoved);
} else if (result.isRetryable()) {
// Failed but is retryable
log.debug("Retrying KillCompactionConfig duty");
throw new RetryableException(result.getException());
} else {
// Failed and not retryable
log.error(result.getException(), "Failed to kill compaction configurations");
emitMetric(params.getEmitter(), 0);
}
return result;
}, e -> e instanceof RetryableException, UPDATE_NUM_RETRY);
} catch (Exception e) {
log.error(e, "Failed to kill compaction configurations");
emitMetric(params.getEmitter(), 0);
}
}
return params;
}
use of org.apache.druid.server.coordinator.CoordinatorCompactionConfig in project druid by druid-io.
the class CompactionResourceTestClient method getCoordinatorCompactionConfigs.
public CoordinatorCompactionConfig getCoordinatorCompactionConfigs() throws Exception {
String url = StringUtils.format("%sconfig/compaction", getCoordinatorURL());
StatusResponseHolder response = httpClient.go(new Request(HttpMethod.GET, new URL(url)), responseHandler).get();
if (!response.getStatus().equals(HttpResponseStatus.OK)) {
throw new ISE("Error while getting compaction config status[%s] content[%s]", response.getStatus(), response.getContent());
}
return jsonMapper.readValue(response.getContent(), new TypeReference<CoordinatorCompactionConfig>() {
});
}
use of org.apache.druid.server.coordinator.CoordinatorCompactionConfig in project druid by druid-io.
the class ITAutoCompactionTest method submitCompactionConfig.
private void submitCompactionConfig(PartitionsSpec partitionsSpec, Period skipOffsetFromLatest, int maxNumConcurrentSubTasks, UserCompactionTaskGranularityConfig granularitySpec, UserCompactionTaskDimensionsConfig dimensionsSpec, UserCompactionTaskTransformConfig transformSpec, AggregatorFactory[] metricsSpec, boolean dropExisting) throws Exception {
DataSourceCompactionConfig compactionConfig = new DataSourceCompactionConfig(fullDatasourceName, null, null, null, skipOffsetFromLatest, new UserCompactionTaskQueryTuningConfig(null, null, null, new MaxSizeSplitHintSpec(null, 1), partitionsSpec, null, null, null, null, null, maxNumConcurrentSubTasks, null, null, null, null, null, 1), granularitySpec, dimensionsSpec, metricsSpec, transformSpec, !dropExisting ? null : new UserCompactionTaskIOConfig(true), null);
compactionResource.submitCompactionConfig(compactionConfig);
// Wait for compaction config to persist
Thread.sleep(2000);
// Verify that the compaction config is updated correctly.
CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
DataSourceCompactionConfig foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(fullDatasourceName)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNotNull(foundDataSourceCompactionConfig);
Assert.assertNotNull(foundDataSourceCompactionConfig.getTuningConfig());
Assert.assertEquals(foundDataSourceCompactionConfig.getTuningConfig().getPartitionsSpec(), partitionsSpec);
Assert.assertEquals(foundDataSourceCompactionConfig.getSkipOffsetFromLatest(), skipOffsetFromLatest);
foundDataSourceCompactionConfig = compactionResource.getDataSourceCompactionConfig(fullDatasourceName);
Assert.assertNotNull(foundDataSourceCompactionConfig);
Assert.assertNotNull(foundDataSourceCompactionConfig.getTuningConfig());
Assert.assertEquals(foundDataSourceCompactionConfig.getTuningConfig().getPartitionsSpec(), partitionsSpec);
Assert.assertEquals(foundDataSourceCompactionConfig.getSkipOffsetFromLatest(), skipOffsetFromLatest);
}
use of org.apache.druid.server.coordinator.CoordinatorCompactionConfig in project druid by druid-io.
the class ITAutoCompactionTest method deleteCompactionConfig.
private void deleteCompactionConfig() throws Exception {
compactionResource.deleteCompactionConfig(fullDatasourceName);
// Verify that the compaction config is updated correctly.
CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
DataSourceCompactionConfig foundDataSourceCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(fullDatasourceName)) {
foundDataSourceCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertNull(foundDataSourceCompactionConfig);
}
use of org.apache.druid.server.coordinator.CoordinatorCompactionConfig in project druid by druid-io.
the class ITAutoCompactionLockContentionTest method submitAndVerifyCompactionConfig.
/**
* Submits a compaction config for the current datasource.
*/
private void submitAndVerifyCompactionConfig() throws Exception {
final DataSourceCompactionConfig compactionConfig = CompactionUtil.createCompactionConfig(fullDatasourceName, Specs.MAX_ROWS_PER_SEGMENT, Period.ZERO);
compactionResource.updateCompactionTaskSlot(0.5, 10, null);
compactionResource.submitCompactionConfig(compactionConfig);
// Wait for compaction config to persist
Thread.sleep(2000);
// Verify that the compaction config is updated correctly.
CoordinatorCompactionConfig coordinatorCompactionConfig = compactionResource.getCoordinatorCompactionConfigs();
DataSourceCompactionConfig observedCompactionConfig = null;
for (DataSourceCompactionConfig dataSourceCompactionConfig : coordinatorCompactionConfig.getCompactionConfigs()) {
if (dataSourceCompactionConfig.getDataSource().equals(fullDatasourceName)) {
observedCompactionConfig = dataSourceCompactionConfig;
}
}
Assert.assertEquals(observedCompactionConfig, compactionConfig);
observedCompactionConfig = compactionResource.getDataSourceCompactionConfig(fullDatasourceName);
Assert.assertEquals(observedCompactionConfig, compactionConfig);
}
Aggregations