Search in sources :

Example 1 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class QueryGranularityTest method testStandardGranularitiesSerde.

@Test
public void testStandardGranularitiesSerde() throws Exception {
    ObjectMapper mapper = new DefaultObjectMapper();
    for (GranularityType granularityType : GranularityType.values()) {
        final Granularity granularity = granularityType.getDefaultGranularity();
        Assert.assertEquals(granularity, mapper.readValue("\"" + StringUtils.toUpperCase(granularityType.name()) + "\"", Granularity.class));
        Assert.assertEquals(granularity, mapper.readValue("\"" + StringUtils.toLowerCase(granularityType.name()) + "\"", Granularity.class));
        Assert.assertEquals(granularity, mapper.readValue(mapper.writeValueAsString(granularity), Granularity.class));
        if (granularityType == GranularityType.ALL || granularityType == GranularityType.NONE) {
            Assert.assertEquals("{\"type\":\"" + StringUtils.toLowerCase(granularityType.name()) + "\"}", mapper.writeValueAsString(granularity));
        } else {
            Assert.assertEquals("\"" + StringUtils.toUpperCase(granularityType.name()) + "\"", mapper.writeValueAsString(granularity));
        }
    }
}
Also used : GranularityType(org.apache.druid.java.util.common.granularity.GranularityType) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) Granularity(org.apache.druid.java.util.common.granularity.Granularity) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) DurationGranularity(org.apache.druid.java.util.common.granularity.DurationGranularity) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) Test(org.junit.Test)

Example 2 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class QueryGranularityTest method testSerializePeriod.

@Test
public void testSerializePeriod() throws Exception {
    final ObjectMapper mapper = new DefaultObjectMapper();
    String json = "{ \"type\": \"period\", \"period\": \"P1D\" }";
    Granularity gran = mapper.readValue(json, Granularity.class);
    Assert.assertEquals(new PeriodGranularity(new Period("P1D"), null, null), gran);
    // Nonstandard period
    json = "{ \"type\": \"period\", \"period\": \"P2D\" }";
    gran = mapper.readValue(json, Granularity.class);
    Assert.assertEquals(new PeriodGranularity(new Period("P2D"), null, null), gran);
    // Set timeZone, origin
    json = "{ \"type\": \"period\", \"period\": \"P1D\"," + "\"timeZone\": \"America/Los_Angeles\", \"origin\": \"1970-01-01T00:00:00Z\"}";
    gran = mapper.readValue(json, Granularity.class);
    Assert.assertEquals(new PeriodGranularity(new Period("P1D"), DateTimes.EPOCH, DateTimes.inferTzFromString("America/Los_Angeles")), gran);
    PeriodGranularity expected = new PeriodGranularity(new Period("P1D"), DateTimes.of("2012-01-01"), DateTimes.inferTzFromString("America/Los_Angeles"));
    String jsonOut = mapper.writeValueAsString(expected);
    Assert.assertEquals(expected, mapper.readValue(jsonOut, Granularity.class));
    String illegalJson = "{ \"type\": \"period\", \"period\": \"P0D\" }";
    try {
        mapper.readValue(illegalJson, Granularity.class);
        Assert.fail();
    } catch (JsonMappingException e) {
    }
}
Also used : JsonMappingException(com.fasterxml.jackson.databind.JsonMappingException) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Period(org.joda.time.Period) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) Granularity(org.apache.druid.java.util.common.granularity.Granularity) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) DurationGranularity(org.apache.druid.java.util.common.granularity.DurationGranularity) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) Test(org.junit.Test)

Example 3 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class CompactSegments method run.

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    LOG.info("Compact segments");
    final CoordinatorCompactionConfig dynamicConfig = params.getCoordinatorCompactionConfig();
    final CoordinatorStats stats = new CoordinatorStats();
    List<DataSourceCompactionConfig> compactionConfigList = dynamicConfig.getCompactionConfigs();
    if (dynamicConfig.getMaxCompactionTaskSlots() > 0) {
        Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = params.getUsedSegmentsTimelinesPerDataSource();
        if (compactionConfigList != null && !compactionConfigList.isEmpty()) {
            Map<String, DataSourceCompactionConfig> compactionConfigs = compactionConfigList.stream().collect(Collectors.toMap(DataSourceCompactionConfig::getDataSource, Function.identity()));
            final List<TaskStatusPlus> compactionTasks = filterNonCompactionTasks(indexingServiceClient.getActiveTasks());
            // dataSource -> list of intervals for which compaction will be skipped in this run
            final Map<String, List<Interval>> intervalsToSkipCompaction = new HashMap<>();
            int numEstimatedNonCompleteCompactionTasks = 0;
            for (TaskStatusPlus status : compactionTasks) {
                final TaskPayloadResponse response = indexingServiceClient.getTaskPayload(status.getId());
                if (response == null) {
                    throw new ISE("Got a null paylord from overlord for task[%s]", status.getId());
                }
                if (COMPACTION_TASK_TYPE.equals(response.getPayload().getType())) {
                    final ClientCompactionTaskQuery compactionTaskQuery = (ClientCompactionTaskQuery) response.getPayload();
                    DataSourceCompactionConfig dataSourceCompactionConfig = compactionConfigs.get(status.getDataSource());
                    if (dataSourceCompactionConfig != null && dataSourceCompactionConfig.getGranularitySpec() != null) {
                        Granularity configuredSegmentGranularity = dataSourceCompactionConfig.getGranularitySpec().getSegmentGranularity();
                        if (configuredSegmentGranularity != null && compactionTaskQuery.getGranularitySpec() != null && !configuredSegmentGranularity.equals(compactionTaskQuery.getGranularitySpec().getSegmentGranularity())) {
                            // We will cancel active compaction task if segmentGranularity changes and we will need to
                            // re-compact the interval
                            LOG.info("Canceled task[%s] as task segmentGranularity is [%s] but compaction config " + "segmentGranularity is [%s]", status.getId(), compactionTaskQuery.getGranularitySpec().getSegmentGranularity(), configuredSegmentGranularity);
                            indexingServiceClient.cancelTask(status.getId());
                            continue;
                        }
                    }
                    // Skip interval as the current active compaction task is good
                    final Interval interval = compactionTaskQuery.getIoConfig().getInputSpec().getInterval();
                    intervalsToSkipCompaction.computeIfAbsent(status.getDataSource(), k -> new ArrayList<>()).add(interval);
                    // Since we keep the current active compaction task running, we count the active task slots
                    numEstimatedNonCompleteCompactionTasks += findMaxNumTaskSlotsUsedByOneCompactionTask(compactionTaskQuery.getTuningConfig());
                } else {
                    throw new ISE("task[%s] is not a compactionTask", status.getId());
                }
            }
            // Skip all the intervals locked by higher priority tasks for each datasource
            // This must be done after the invalid compaction tasks are cancelled
            // in the loop above so that their intervals are not considered locked
            getLockedIntervalsToSkip(compactionConfigList).forEach((dataSource, intervals) -> intervalsToSkipCompaction.computeIfAbsent(dataSource, ds -> new ArrayList<>()).addAll(intervals));
            final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, intervalsToSkipCompaction);
            int totalCapacity;
            if (dynamicConfig.isUseAutoScaleSlots()) {
                try {
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacityWithAutoScale();
                } catch (Exception e) {
                    LOG.warn("Failed to get total worker capacity with auto scale slots. Falling back to current capacity count");
                    totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
                }
            } else {
                totalCapacity = indexingServiceClient.getTotalWorkerCapacity();
            }
            final int compactionTaskCapacity = (int) Math.min(totalCapacity * dynamicConfig.getCompactionTaskSlotRatio(), dynamicConfig.getMaxCompactionTaskSlots());
            final int numAvailableCompactionTaskSlots;
            if (numEstimatedNonCompleteCompactionTasks > 0) {
                numAvailableCompactionTaskSlots = Math.max(0, compactionTaskCapacity - numEstimatedNonCompleteCompactionTasks);
            } else {
                // compactionTaskCapacity might be 0 if totalWorkerCapacity is low.
                // This guarantees that at least one slot is available if
                // compaction is enabled and numEstimatedNonCompleteCompactionTasks is 0.
                numAvailableCompactionTaskSlots = Math.max(1, compactionTaskCapacity);
            }
            LOG.info("Found [%d] available task slots for compaction out of [%d] max compaction task capacity", numAvailableCompactionTaskSlots, compactionTaskCapacity);
            stats.addToGlobalStat(AVAILABLE_COMPACTION_TASK_SLOT, numAvailableCompactionTaskSlots);
            stats.addToGlobalStat(MAX_COMPACTION_TASK_SLOT, compactionTaskCapacity);
            final Map<String, AutoCompactionSnapshot.Builder> currentRunAutoCompactionSnapshotBuilders = new HashMap<>();
            if (numAvailableCompactionTaskSlots > 0) {
                stats.accumulate(doRun(compactionConfigs, currentRunAutoCompactionSnapshotBuilders, numAvailableCompactionTaskSlots, iterator));
            } else {
                stats.accumulate(makeStats(currentRunAutoCompactionSnapshotBuilders, 0, iterator));
            }
        } else {
            LOG.info("compactionConfig is empty. Skip.");
            autoCompactionSnapshotPerDataSource.set(new HashMap<>());
        }
    } else {
        LOG.info("maxCompactionTaskSlots was set to 0. Skip compaction");
        autoCompactionSnapshotPerDataSource.set(new HashMap<>());
    }
    return params.buildFromExisting().withCoordinatorStats(stats).build();
}
Also used : Logger(org.apache.druid.java.util.common.logger.Logger) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Inject(com.google.inject.Inject) ClientCompactionTaskDimensionsSpec(org.apache.druid.client.indexing.ClientCompactionTaskDimensionsSpec) DruidCoordinatorRuntimeParams(org.apache.druid.server.coordinator.DruidCoordinatorRuntimeParams) IndexingServiceClient(org.apache.druid.client.indexing.IndexingServiceClient) HashMap(java.util.HashMap) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) AutoCompactionSnapshot(org.apache.druid.server.coordinator.AutoCompactionSnapshot) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ArrayList(java.util.ArrayList) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) Interval(org.joda.time.Interval) DruidCoordinatorConfig(org.apache.druid.server.coordinator.DruidCoordinatorConfig) Map(java.util.Map) IAE(org.apache.druid.java.util.common.IAE) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) Nullable(javax.annotation.Nullable) ClientCompactionTaskTransformSpec(org.apache.druid.client.indexing.ClientCompactionTaskTransformSpec) JacksonInject(com.fasterxml.jackson.annotation.JacksonInject) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ClientCompactionTaskQueryTuningConfig(org.apache.druid.client.indexing.ClientCompactionTaskQueryTuningConfig) CompactionStatistics(org.apache.druid.server.coordinator.CompactionStatistics) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) List(java.util.List) ClientCompactionTaskGranularitySpec(org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec) GranularityType(org.apache.druid.java.util.common.granularity.GranularityType) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) DataSegment(org.apache.druid.timeline.DataSegment) VisibleForTesting(com.google.common.annotations.VisibleForTesting) CoordinatorStats(org.apache.druid.server.coordinator.CoordinatorStats) CoordinatorCompactionConfig(org.apache.druid.server.coordinator.CoordinatorCompactionConfig) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DataSourceCompactionConfig(org.apache.druid.server.coordinator.DataSourceCompactionConfig) ClientCompactionTaskQuery(org.apache.druid.client.indexing.ClientCompactionTaskQuery) TaskStatusPlus(org.apache.druid.indexer.TaskStatusPlus) ArrayList(java.util.ArrayList) List(java.util.List) ISE(org.apache.druid.java.util.common.ISE) TaskPayloadResponse(org.apache.druid.client.indexing.TaskPayloadResponse) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) Interval(org.joda.time.Interval)

Example 4 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class CalciteInsertDmlTest method testPartitionedBySupportedClauses.

@Test
public void testPartitionedBySupportedClauses() {
    RowSignature targetRowSignature = RowSignature.builder().add("__time", ColumnType.LONG).add("dim1", ColumnType.STRING).build();
    Map<String, Granularity> partitionedByArgumentToGranularityMap = ImmutableMap.<String, Granularity>builder().put("HOUR", Granularities.HOUR).put("DAY", Granularities.DAY).put("MONTH", Granularities.MONTH).put("YEAR", Granularities.YEAR).put("ALL", Granularities.ALL).put("ALL TIME", Granularities.ALL).put("FLOOR(__time TO QUARTER)", Granularities.QUARTER).put("TIME_FLOOR(__time, 'PT1H')", Granularities.HOUR).build();
    partitionedByArgumentToGranularityMap.forEach((partitionedByArgument, expectedGranularity) -> {
        Map<String, Object> queryContext = null;
        try {
            queryContext = ImmutableMap.of(DruidSqlInsert.SQL_INSERT_SEGMENT_GRANULARITY, queryJsonMapper.writeValueAsString(expectedGranularity));
        } catch (JsonProcessingException e) {
            // Won't reach here
            Assert.fail(e.getMessage());
        }
        testInsertQuery().sql(StringUtils.format("INSERT INTO druid.dst SELECT __time, dim1 FROM foo PARTITIONED BY %s", partitionedByArgument)).expectTarget("dst", targetRowSignature).expectResources(dataSourceRead("foo"), dataSourceWrite("dst")).expectQuery(newScanQueryBuilder().dataSource("foo").intervals(querySegmentSpec(Filtration.eternity())).columns("__time", "dim1").context(queryContext).build()).verify();
        didTest = false;
    });
    didTest = true;
}
Also used : Granularity(org.apache.druid.java.util.common.granularity.Granularity) RowSignature(org.apache.druid.segment.column.RowSignature) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Test(org.junit.Test)

Example 5 with Granularity

use of org.apache.druid.java.util.common.granularity.Granularity in project druid by druid-io.

the class TimeseriesQueryRunnerTest method testTimeseriesWithTimestampResultFieldContextForMapResponse.

@Test
public void testTimeseriesWithTimestampResultFieldContextForMapResponse() {
    Granularity gran = Granularities.DAY;
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(gran).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(QueryRunnerTestHelper.ROWS_COUNT, QueryRunnerTestHelper.INDEX_DOUBLE_SUM, QueryRunnerTestHelper.QUALITY_UNIQUES).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext(ImmutableMap.of(TimeseriesQuery.CTX_TIMESTAMP_RESULT_FIELD, TIMESTAMP_RESULT_FIELD_NAME, TimeseriesQuery.SKIP_EMPTY_BUCKETS, true))).build();
    Assert.assertEquals(TIMESTAMP_RESULT_FIELD_NAME, query.getTimestampResultField());
    Iterable<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
    final String[] expectedIndex = descending ? QueryRunnerTestHelper.EXPECTED_FULL_ON_INDEX_VALUES_DESC : QueryRunnerTestHelper.EXPECTED_FULL_ON_INDEX_VALUES;
    final String[] expectedIndexToUse = Arrays.stream(expectedIndex).filter(eachIndex -> !"0.0".equals(eachIndex)).toArray(String[]::new);
    final DateTime expectedLast = descending ? QueryRunnerTestHelper.EARLIEST : QueryRunnerTestHelper.LAST;
    int count = 0;
    Result lastResult = null;
    for (Result<TimeseriesResultValue> result : results) {
        DateTime current = result.getTimestamp();
        Assert.assertFalse(StringUtils.format("Timestamp[%s] > expectedLast[%s]", current, expectedLast), descending ? current.isBefore(expectedLast) : current.isAfter(expectedLast));
        final TimeseriesResultValue value = result.getValue();
        Assert.assertEquals(value.getLongMetric(TIMESTAMP_RESULT_FIELD_NAME), current.getMillis(), 0);
        Assert.assertEquals(result.toString(), QueryRunnerTestHelper.SKIPPED_DAY.equals(current) ? 0L : 13L, value.getLongMetric("rows").longValue());
        if (!QueryRunnerTestHelper.SKIPPED_DAY.equals(current)) {
            Assert.assertEquals(result.toString(), Doubles.tryParse(expectedIndexToUse[count]).doubleValue(), value.getDoubleMetric("index").doubleValue(), value.getDoubleMetric("index").doubleValue() * 1e-6);
            Assert.assertEquals(result.toString(), new Double(expectedIndexToUse[count]) + 13L + 1L, value.getDoubleMetric("addRowsIndexConstant"), value.getDoubleMetric("addRowsIndexConstant") * 1e-6);
            Assert.assertEquals(value.getDoubleMetric("uniques"), 9.0d, 0.02);
        } else {
            if (NullHandling.replaceWithDefault()) {
                Assert.assertEquals(result.toString(), 0.0D, value.getDoubleMetric("index").doubleValue(), value.getDoubleMetric("index").doubleValue() * 1e-6);
                Assert.assertEquals(result.toString(), new Double(expectedIndexToUse[count]) + 1L, value.getDoubleMetric("addRowsIndexConstant"), value.getDoubleMetric("addRowsIndexConstant") * 1e-6);
                Assert.assertEquals(0.0D, value.getDoubleMetric("uniques"), 0.02);
            } else {
                Assert.assertNull(result.toString(), value.getDoubleMetric("index"));
                Assert.assertNull(result.toString(), value.getDoubleMetric("addRowsIndexConstant"));
                Assert.assertEquals(value.getDoubleMetric("uniques"), 0.0d, 0.02);
            }
        }
        lastResult = result;
        ++count;
    }
    Assert.assertEquals(lastResult.toString(), expectedLast, lastResult.getTimestamp());
}
Also used : QueryPlus(org.apache.druid.query.QueryPlus) DateTimeZone(org.joda.time.DateTimeZone) Arrays(java.util.Arrays) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Druids(org.apache.druid.query.Druids) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) SelectorDimFilter(org.apache.druid.query.filter.SelectorDimFilter) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) Parameterized(org.junit.runners.Parameterized) AndDimFilter(org.apache.druid.query.filter.AndDimFilter) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) FinalizeResultsQueryRunner(org.apache.druid.query.FinalizeResultsQueryRunner) RegexDimFilter(org.apache.druid.query.filter.RegexDimFilter) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ImmutableSet(com.google.common.collect.ImmutableSet) NotDimFilter(org.apache.druid.query.filter.NotDimFilter) ImmutableMap(com.google.common.collect.ImmutableMap) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) StringUtils(org.apache.druid.java.util.common.StringUtils) TestExprMacroTable(org.apache.druid.query.expression.TestExprMacroTable) Collectors(java.util.stream.Collectors) QueryContexts(org.apache.druid.query.QueryContexts) List(java.util.List) QueryRunnerTestHelper(org.apache.druid.query.QueryRunnerTestHelper) Doubles(com.google.common.primitives.Doubles) DoubleFirstAggregatorFactory(org.apache.druid.query.aggregation.first.DoubleFirstAggregatorFactory) DoubleLastAggregatorFactory(org.apache.druid.query.aggregation.last.DoubleLastAggregatorFactory) Iterables(com.google.common.collect.Iterables) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) Granularity(org.apache.druid.java.util.common.granularity.Granularity) Intervals(org.apache.druid.java.util.common.Intervals) FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) MapLookupExtractor(org.apache.druid.query.extraction.MapLookupExtractor) HumanReadableBytes(org.apache.druid.java.util.common.HumanReadableBytes) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) LookupExtractionFn(org.apache.druid.query.lookup.LookupExtractionFn) DoubleMinAggregatorFactory(org.apache.druid.query.aggregation.DoubleMinAggregatorFactory) StringComparators(org.apache.druid.query.ordering.StringComparators) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) StreamSupport(java.util.stream.StreamSupport) BoundDimFilter(org.apache.druid.query.filter.BoundDimFilter) ExpressionVirtualColumn(org.apache.druid.segment.virtual.ExpressionVirtualColumn) ExpectedException(org.junit.rules.ExpectedException) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) DoubleMaxAggregatorFactory(org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory) CardinalityAggregatorFactory(org.apache.druid.query.aggregation.cardinality.CardinalityAggregatorFactory) Period(org.joda.time.Period) ExpressionLambdaAggregatorFactory(org.apache.druid.query.aggregation.ExpressionLambdaAggregatorFactory) DateTime(org.joda.time.DateTime) QueryToolChest(org.apache.druid.query.QueryToolChest) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test) Granularities(org.apache.druid.java.util.common.granularity.Granularities) Result(org.apache.druid.query.Result) TestHelper(org.apache.druid.segment.TestHelper) Rule(org.junit.Rule) NullHandling(org.apache.druid.common.config.NullHandling) RowSignature(org.apache.druid.segment.column.RowSignature) FieldAccessPostAggregator(org.apache.druid.query.aggregation.post.FieldAccessPostAggregator) ColumnType(org.apache.druid.segment.column.ColumnType) FloatSumAggregatorFactory(org.apache.druid.query.aggregation.FloatSumAggregatorFactory) InDimFilter(org.apache.druid.query.filter.InDimFilter) OrDimFilter(org.apache.druid.query.filter.OrDimFilter) Assert(org.junit.Assert) Collections(java.util.Collections) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DateTime(org.joda.time.DateTime) Result(org.apache.druid.query.Result) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Aggregations

Granularity (org.apache.druid.java.util.common.granularity.Granularity)58 Interval (org.joda.time.Interval)27 ArrayList (java.util.ArrayList)22 DateTime (org.joda.time.DateTime)19 Test (org.junit.Test)16 List (java.util.List)14 Map (java.util.Map)14 HashMap (java.util.HashMap)13 Nullable (javax.annotation.Nullable)12 PeriodGranularity (org.apache.druid.java.util.common.granularity.PeriodGranularity)12 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)12 Period (org.joda.time.Period)11 ISE (org.apache.druid.java.util.common.ISE)8 Result (org.apache.druid.query.Result)8 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)7 ImmutableList (com.google.common.collect.ImmutableList)7 VisibleForTesting (com.google.common.annotations.VisibleForTesting)6 ClientCompactionTaskGranularitySpec (org.apache.druid.client.indexing.ClientCompactionTaskGranularitySpec)6 LockGranularity (org.apache.druid.indexing.common.LockGranularity)6 Sequence (org.apache.druid.java.util.common.guava.Sequence)6