use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class QueryGranularityTest method testStandardGranularitiesSerde.
@Test
public void testStandardGranularitiesSerde() throws Exception {
ObjectMapper mapper = new DefaultObjectMapper();
for (GranularityType granularityType : GranularityType.values()) {
final Granularity granularity = granularityType.getDefaultGranularity();
Assert.assertEquals(granularity, mapper.readValue("\"" + StringUtils.toUpperCase(granularityType.name()) + "\"", Granularity.class));
Assert.assertEquals(granularity, mapper.readValue("\"" + StringUtils.toLowerCase(granularityType.name()) + "\"", Granularity.class));
Assert.assertEquals(granularity, mapper.readValue(mapper.writeValueAsString(granularity), Granularity.class));
if (granularityType == GranularityType.ALL || granularityType == GranularityType.NONE) {
Assert.assertEquals("{\"type\":\"" + StringUtils.toLowerCase(granularityType.name()) + "\"}", mapper.writeValueAsString(granularity));
} else {
Assert.assertEquals("\"" + StringUtils.toUpperCase(granularityType.name()) + "\"", mapper.writeValueAsString(granularity));
}
}
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class QueryGranularityTest method testSerializePeriod.
@Test
public void testSerializePeriod() throws Exception {
final ObjectMapper mapper = new DefaultObjectMapper();
String json = "{ \"type\": \"period\", \"period\": \"P1D\" }";
Granularity gran = mapper.readValue(json, Granularity.class);
Assert.assertEquals(new PeriodGranularity(new Period("P1D"), null, null), gran);
// Nonstandard period
json = "{ \"type\": \"period\", \"period\": \"P2D\" }";
gran = mapper.readValue(json, Granularity.class);
Assert.assertEquals(new PeriodGranularity(new Period("P2D"), null, null), gran);
// Set timeZone, origin
json = "{ \"type\": \"period\", \"period\": \"P1D\"," + "\"timeZone\": \"America/Los_Angeles\", \"origin\": \"1970-01-01T00:00:00Z\"}";
gran = mapper.readValue(json, Granularity.class);
Assert.assertEquals(new PeriodGranularity(new Period("P1D"), DateTimes.EPOCH, DateTimes.inferTzFromString("America/Los_Angeles")), gran);
PeriodGranularity expected = new PeriodGranularity(new Period("P1D"), DateTimes.of("2012-01-01"), DateTimes.inferTzFromString("America/Los_Angeles"));
String jsonOut = mapper.writeValueAsString(expected);
Assert.assertEquals(expected, mapper.readValue(jsonOut, Granularity.class));
String illegalJson = "{ \"type\": \"period\", \"period\": \"P0D\" }";
try {
mapper.readValue(illegalJson, Granularity.class);
Assert.fail();
} catch (JsonMappingException e) {
}
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class CompactSegments method run.
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
LOG.info("Compact segments");
final CoordinatorCompactionConfig dynamicConfig = params.getCoordinatorCompactionConfig();
final CoordinatorStats stats = new CoordinatorStats();
List<DataSourceCompactionConfig> compactionConfigList = dynamicConfig.getCompactionConfigs();
if (dynamicConfig.getMaxCompactionTaskSlots() > 0) {
Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = params.getUsedSegmentsTimelinesPerDataSource();
if (compactionConfigList != null && !compactionConfigList.isEmpty()) {
Map<String, DataSourceCompactionConfig> compactionConfigs = compactionConfigList.stream().collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
final List<TaskStatusPlus> compactionTasks = filterNonCompactionTasks(indexingServiceClient.getActiveTasks());
// dataSource -> list of intervals for which compaction will be skipped in this run
final Map<String, List<Interval>> intervalsToSkipCompaction = new HashMap<>();
int numEstimatedNonCompleteCompactionTasks = 0;
for (TaskStatusPlus status : compactionTasks) {
final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
if (response == null) {
throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
}
if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
DataSourceCompactionConfig dataSourceCompactionConfig = compactionConfigs.get(status.getDataSource());
if (dataSourceCompactionConfig != null && dataSourceCompactionConfig.getGranularitySpec() != null) {
Granularity configuredSegmentGranularity = dataSourceCompactionConfig.getGranularitySpec().getSegmentGranularity();
if (configuredSegmentGranularity != null && compactionTaskQuery.getGranularitySpec() != null && !configuredSegmentGranularity.equals(compactionTaskQuery.getGranularitySpec().getSegmentGranularity())) {
// We will cancel active compaction task if segmentGranularity changes and we will need to
// re-compact the interval
LOG.info("Canceled task[%s] as task segmentGranularity is [%s] but compaction config " + "segmentGranularity is [%s]", status.getId(), compactionTaskQuery.getGranularitySpec().getSegmentGranularity(), configuredSegmentGranularity);
indexingServiceClient.cancelTask(status.getId());
continue;
}
}
// Skip interval as the current active compaction task is good
final Interval interval = compactionTaskQuery.getIoConfig().getInputSpec().getInterval();
intervalsToSkipCompaction.computeIfAbsent(status.getDataSource(), k -> new ArrayList<>()).add(interval);
// Since we keep the current active compaction task running, we count the active task slots
numEstimatedNonCompleteCompactionTasks += findMaxNumTaskSlotsUsedByOneCompactionTask(compactionTaskQuery.getTuningConfig());
} else {
throw new ISE("task[%s] is not a compactionTask", status.getId());
}
}
// Skip all the intervals locked by higher priority tasks for each datasource
// This must be done after the invalid compaction tasks are cancelled
// in the loop above so that their intervals are not considered locked
getLockedIntervalsToSkip(compactionConfigList).forEach((dataSource, intervals) -> intervalsToSkipCompaction.computeIfAbsent(dataSource, ds -> new ArrayList<>()).addAll(intervals));
final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, intervalsToSkipCompaction);
int totalCapacity;
if (dynamicConfig.isUseAutoScaleSlots()) {
try {
totalCapacity = indexingServiceClient.getTotalWorkerCapacityWithAutoScale();
} catch (Exception e) {
LOG.warn("Failed to get total worker capacity with auto scale slots. Falling back to current capacity count");
totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
}
} else {
totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
}
final int compactionTaskCapacity = (int) Math.min(totalCapacity * dynamicConfig.getCompactionTaskSlotRatio(), dynamicConfig.getMaxCompactionTaskSlots());
final int numAvailableCompactionTaskSlots;
if (numEstimatedNonCompleteCompactionTasks > 0) {
numAvailableCompactionTaskSlots = Math.max(0, compactionTaskCapacity - numEstimatedNonCompleteCompactionTasks);
} else {
// compactionTaskCapacity might be 0 if totalWorkerCapacity is low.
// This guarantees that at least one slot is available if
// compaction is enabled and numEstimatedNonCompleteCompactionTasks is 0.
numAvailableCompactionTaskSlots = Math.max(1, compactionTaskCapacity);
}
LOG.info("Found [%d] available task slots for compaction out of [%d] max compaction task capacity", numAvailableCompactionTaskSlots, compactionTaskCapacity);
stats.addToGlobalStat(AVAILABLE_COMPACTION_TASK_SLOT, numAvailableCompactionTaskSlots);
stats.addToGlobalStat(MAX_COMPACTION_TASK_SLOT, compactionTaskCapacity);
final Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders = new HashMap<>();
if (numAvailableCompactionTaskSlots > 0) {
stats.accumulate(doRun(compactionConfigs, currentRunAutoCompactionSnapshotBuilders, numAvailableCompactionTaskSlots, iterator));
} else {
stats.accumulate(makeStats(currentRunAutoCompactionSnapshotBuilders, 0, iterator));
}
} else {
LOG.info("compactionConfig is empty. Skip.");
autoCompactionSnapshotPerDataSource.set(new HashMap<>());
}
} else {
LOG.info("maxCompactionTaskSlots was set to 0. Skip compaction");
autoCompactionSnapshotPerDataSource.set(new HashMap<>());
}
return params.buildFromExisting().withCoordinatorStats(stats).build();
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class CalciteInsertDmlTest method testPartitionedBySupportedClauses.
@Test
public void testPartitionedBySupportedClauses() {
RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("dim1", ColumnType.STRING).build();
Map<String, Granularity> partitionedByArgumentToGranularityMap = ImmutableMap.<String, Granularity>builder().put("HOUR", Granularities.HOUR).put("DAY", Granularities.DAY).put("MONTH", Granularities.MONTH).put("YEAR", Granularities.YEAR).put("ALL", Granularities.ALL).put("ALL TIME", Granularities.ALL).put("FLOOR(__time TO QUARTER)", Granularities.QUARTER).put("TIME_FLOOR(__time, 'PT1H')", Granularities.HOUR).build();
partitionedByArgumentToGranularityMap.forEach((partitionedByArgument, expectedGranularity) -> {
Map<String, Object> queryContext = null;
try {
queryContext = ImmutableMap.of(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, queryJsonMapper.writeValueAsString(expectedGranularity));
} catch (JsonProcessingException e) {
// Won't reach here
Assert.fail(e.getMessage());
}
testInsertQuery().sql(StringUtils.format("INSERT INTO druid.dst SELECT __time, dim1 FROM foo PARTITIONED BY %s", partitionedByArgument)).expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1").context(queryContext).build()).verify();
didTest = false;
});
didTest = true;
}
use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesWithTimestampResultFieldContextForMapResponse.
@Test
public void testTimeseriesWithTimestampResultFieldContextForMapResponse() {
Granularity gran = Granularities.DAY;
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(gran).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(QueryRunnerTestHelper.ROWS_COUNT, QueryRunnerTestHelper.INDEX_DOUBLE_SUM, QueryRunnerTestHelper.QUALITY_UNIQUES).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext(ImmutableMap.of(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, TIMESTAMP_RESULT_FIELD_NAME, TimeseriesQuery.SKIP_EMPTY_BUCKETS, true))).build();
Assert.assertEquals(TIMESTAMP_RESULT_FIELD_NAME, query.getTimestampResultField());
Iterable<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
final String[] expectedIndex = descending ? QueryRunnerTestHelper.EXPECTED_FULL_ON_INDEX_VALUES_DESC : QueryRunnerTestHelper.EXPECTED_FULL_ON_INDEX_VALUES;
final String[] expectedIndexToUse = Arrays.stream(expectedIndex).filter(eachIndex -> !"0.0".equals(eachIndex)).toArray(String[]::new);
final DateTime expectedLast = descending ? QueryRunnerTestHelper.EARLIEST : QueryRunnerTestHelper.LAST;
int count = 0;
Result lastResult = null;
for (Result<TimeseriesResultValue> result : results) {
DateTime current = result.getTimestamp();
Assert.assertFalse(StringUtils.format("Timestamp[%s] > expectedLast[%s]", current, expectedLast), descending ? current.isBefore(expectedLast) : current.isAfter(expectedLast));
final TimeseriesResultValue value = result.getValue();
Assert.assertEquals(value.getLongMetric(TIMESTAMP_RESULT_FIELD_NAME), current.getMillis(), 0);
Assert.assertEquals(result.toString(), QueryRunnerTestHelper.SKIPPED_DAY.equals(current) ? 0L : 13L, value.getLongMetric("rows").longValue());
if (!QueryRunnerTestHelper.SKIPPED_DAY.equals(current)) {
Assert.assertEquals(result.toString(), Doubles.tryParse(expectedIndexToUse[count]).doubleValue(), value.getDoubleMetric("index").doubleValue(), value.getDoubleMetric("index").doubleValue() * 1e-6);
Assert.assertEquals(result.toString(), new Double(expectedIndexToUse[count]) + 13L + 1L, value.getDoubleMetric("addRowsIndexConstant"), value.getDoubleMetric("addRowsIndexConstant") * 1e-6);
Assert.assertEquals(value.getDoubleMetric("uniques"), 9.0d, 0.02);
} else {
if (NullHandling.replaceWithDefault()) {
Assert.assertEquals(result.toString(), 0.0D, value.getDoubleMetric("index").doubleValue(), value.getDoubleMetric("index").doubleValue() * 1e-6);
Assert.assertEquals(result.toString(), new Double(expectedIndexToUse[count]) + 1L, value.getDoubleMetric("addRowsIndexConstant"), value.getDoubleMetric("addRowsIndexConstant") * 1e-6);
Assert.assertEquals(0.0D, value.getDoubleMetric("uniques"), 0.02);
} else {
Assert.assertNull(result.toString(), value.getDoubleMetric("index"));
Assert.assertNull(result.toString(), value.getDoubleMetric("addRowsIndexConstant"));
Assert.assertEquals(value.getDoubleMetric("uniques"), 0.0d, 0.02);
}
}
lastResult = result;
++count;
}
Assert.assertEquals(lastResult.toString(), expectedLast, lastResult.getTimestamp());
}
Aggregations