Search in sources :

Example 6 with HllSketch

use of org.apache.datasketches.hll.HllSketch in project druid by druid-io.

the class HllSketchMergeVectorAggregator method aggregate.

@Override
public void aggregate(final ByteBuffer buf, final int numRows, final int[] positions, @Nullable final int[] rows, final int positionOffset) {
    final Object[] vector = objectSupplier.get();
    for (int i = 0; i < numRows; i++) {
        final HllSketch o = (HllSketch) vector[rows != null ? rows[i] : i];
        if (o != null) {
            final int position = positions[i] + positionOffset;
            final WritableMemory mem = WritableMemory.writableWrap(buf, ByteOrder.LITTLE_ENDIAN).writableRegion(position, helper.getSize());
            final Union union = Union.writableWrap(mem);
            union.update(o);
        }
    }
}
Also used : HllSketch(org.apache.datasketches.hll.HllSketch) WritableMemory(org.apache.datasketches.memory.WritableMemory) Union(org.apache.datasketches.hll.Union)

Example 7 with HllSketch

use of org.apache.datasketches.hll.HllSketch in project druid by druid-io.

the class HllSketchUnionPostAggregator method compute.

@Override
public HllSketch compute(final Map<String, Object> combinedAggregators) {
    final Union union = new Union(lgK);
    for (final PostAggregator field : fields) {
        final HllSketch sketch = (HllSketch) field.compute(combinedAggregators);
        union.update(sketch);
    }
    return union.getResult(tgtHllType);
}
Also used : HllSketch(org.apache.datasketches.hll.HllSketch) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) Union(org.apache.datasketches.hll.Union)

Example 8 with HllSketch

use of org.apache.datasketches.hll.HllSketch in project druid by druid-io.

the class ParallelIndexSupervisorTask method mergeCardinalityReports.

private static Map<Interval, Union> mergeCardinalityReports(Collection<DimensionCardinalityReport> reports) {
    Map<Interval, Union> finalCollectors = new HashMap<>();
    reports.forEach(report -> {
        Map<Interval, byte[]> intervalToCardinality = report.getIntervalToCardinalities();
        for (Map.Entry<Interval, byte[]> entry : intervalToCardinality.entrySet()) {
            HllSketch entryHll = HllSketch.wrap(Memory.wrap(entry.getValue()));
            finalCollectors.computeIfAbsent(entry.getKey(), k -> new Union(DimensionCardinalityReport.HLL_SKETCH_LOG_K)).update(entryHll);
        }
    });
    return finalCollectors;
}
Also used : HllSketch(org.apache.datasketches.hll.HllSketch) ArrayListMultimap(com.google.common.collect.ArrayListMultimap) TaskReport(org.apache.druid.indexing.common.TaskReport) TaskToolbox(org.apache.druid.indexing.common.TaskToolbox) JsonProperty(com.fasterxml.jackson.annotation.JsonProperty) PartitionBoundaries(org.apache.druid.timeline.partition.PartitionBoundaries) Produces(javax.ws.rs.Produces) IngestionState(org.apache.druid.indexer.IngestionState) Pair(org.apache.druid.java.util.common.Pair) MediaType(javax.ws.rs.core.MediaType) TaskActionClient(org.apache.druid.indexing.common.actions.TaskActionClient) SegmentTransactionalInsertAction(org.apache.druid.indexing.common.actions.SegmentTransactionalInsertAction) FiniteFirehoseFactory(org.apache.druid.data.input.FiniteFirehoseFactory) Map(java.util.Map) StringDistribution(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringDistribution) AbstractBatchIndexTask(org.apache.druid.indexing.common.task.AbstractBatchIndexTask) InputFormat(org.apache.druid.data.input.InputFormat) IngestionStatsAndErrorsTaskReportData(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReportData) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) TaskState(org.apache.druid.indexer.TaskState) IndexTuningConfig(org.apache.druid.indexing.common.task.IndexTask.IndexTuningConfig) IndexTaskUtils(org.apache.druid.indexing.common.task.IndexTaskUtils) Granularity(org.apache.druid.java.util.common.granularity.Granularity) GET(javax.ws.rs.GET) Tasks(org.apache.druid.indexing.common.task.Tasks) TaskStatus(org.apache.druid.indexer.TaskStatus) ArrayList(java.util.ArrayList) IndexTask(org.apache.druid.indexing.common.task.IndexTask) Interval(org.joda.time.Interval) HttpServletRequest(javax.servlet.http.HttpServletRequest) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) StringSketchMerger(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringSketchMerger) PartitionsSpec(org.apache.druid.indexer.partitions.PartitionsSpec) Nullable(javax.annotation.Nullable) BuildingShardSpec(org.apache.druid.timeline.partition.BuildingShardSpec) GranularitySpec(org.apache.druid.segment.indexing.granularity.GranularitySpec) Throwables(com.google.common.base.Throwables) StringDistributionMerger(org.apache.druid.indexing.common.task.batch.parallel.distribution.StringDistributionMerger) IOException(java.io.IOException) TreeMap(java.util.TreeMap) ChatHandlers(org.apache.druid.segment.realtime.firehose.ChatHandlers) Preconditions(com.google.common.base.Preconditions) ArbitraryGranularitySpec(org.apache.druid.segment.indexing.granularity.ArbitraryGranularitySpec) SubTaskSpecStatus(org.apache.druid.indexing.common.task.batch.parallel.ParallelIndexTaskRunner.SubTaskSpecStatus) HllSketch(org.apache.datasketches.hll.HllSketch) AuthorizerMapper(org.apache.druid.server.security.AuthorizerMapper) Path(javax.ws.rs.Path) Memory(org.apache.datasketches.memory.Memory) TaskResource(org.apache.druid.indexing.common.task.TaskResource) MonotonicNonNull(org.checkerframework.checker.nullness.qual.MonotonicNonNull) ChatHandler(org.apache.druid.segment.realtime.firehose.ChatHandler) QueryParam(javax.ws.rs.QueryParam) Consumes(javax.ws.rs.Consumes) Union(org.apache.datasketches.hll.Union) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Task(org.apache.druid.indexing.common.task.Task) SmileMediaTypes(com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes) Context(javax.ws.rs.core.Context) ImmutableMap(com.google.common.collect.ImmutableMap) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) Collection(java.util.Collection) StringUtils(org.apache.druid.java.util.common.StringUtils) HashedPartitionsSpec(org.apache.druid.indexer.partitions.HashedPartitionsSpec) Action(org.apache.druid.server.security.Action) Collectors(java.util.stream.Collectors) MaxAllowedLocksExceededException(org.apache.druid.indexing.common.task.batch.MaxAllowedLocksExceededException) Objects(java.util.Objects) IndexIngestionSpec(org.apache.druid.indexing.common.task.IndexTask.IndexIngestionSpec) List(java.util.List) Response(javax.ws.rs.core.Response) DataSegment(org.apache.druid.timeline.DataSegment) Entry(java.util.Map.Entry) CurrentSubTaskHolder(org.apache.druid.indexing.common.task.CurrentSubTaskHolder) Logger(org.apache.druid.java.util.common.logger.Logger) PathParam(javax.ws.rs.PathParam) CollectionUtils(org.apache.druid.utils.CollectionUtils) HashMap(java.util.HashMap) Multimap(com.google.common.collect.Multimap) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) Function(java.util.function.Function) TuningConfig(org.apache.druid.segment.indexing.TuningConfig) HashSet(java.util.HashSet) InputSource(org.apache.druid.data.input.InputSource) RowIngestionMetersTotals(org.apache.druid.segment.incremental.RowIngestionMetersTotals) Status(javax.ws.rs.core.Response.Status) DimensionRangePartitionsSpec(org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec) ParseExceptionReport(org.apache.druid.segment.incremental.ParseExceptionReport) POST(javax.ws.rs.POST) TransactionalSegmentPublisher(org.apache.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) DateTime(org.joda.time.DateTime) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) IngestionStatsAndErrorsTaskReport(org.apache.druid.indexing.common.IngestionStatsAndErrorsTaskReport) JsonCreator(com.fasterxml.jackson.annotation.JsonCreator) IntermediaryDataManager(org.apache.druid.indexing.worker.shuffle.IntermediaryDataManager) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) Collections(java.util.Collections) HashMap(java.util.HashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) Union(org.apache.datasketches.hll.Union) Interval(org.joda.time.Interval)

Example 9 with HllSketch

use of org.apache.datasketches.hll.HllSketch in project druid by druid-io.

the class PartialDimensionCardinalityTask method determineCardinalities.

private Map<Interval, byte[]> determineCardinalities(CloseableIterator<InputRow> inputRowIterator, GranularitySpec granularitySpec) {
    Map<Interval, HllSketch> intervalToCardinalities = new HashMap<>();
    while (inputRowIterator.hasNext()) {
        InputRow inputRow = inputRowIterator.next();
        // null rows are filtered out by FilteringCloseableInputRowIterator
        DateTime timestamp = inputRow.getTimestamp();
        final Interval interval;
        if (granularitySpec.inputIntervals().isEmpty()) {
            interval = granularitySpec.getSegmentGranularity().bucket(timestamp);
        } else {
            final Optional<Interval> optInterval = granularitySpec.bucketInterval(timestamp);
            // this interval must exist since it passed the rowFilter
            assert optInterval.isPresent();
            interval = optInterval.get();
        }
        Granularity queryGranularity = granularitySpec.getQueryGranularity();
        HllSketch hllSketch = intervalToCardinalities.computeIfAbsent(interval, (intervalKey) -> DimensionCardinalityReport.createHllSketchForReport());
        // For cardinality estimation, we want to consider unique rows instead of unique hash buckets and therefore
        // we do not use partition dimensions in computing the group key
        List<Object> groupKey = HashPartitioner.extractKeys(Collections.emptyList(), queryGranularity.bucketStart(timestamp).getMillis(), inputRow);
        try {
            hllSketch.update(jsonMapper.writeValueAsBytes(groupKey));
        } catch (JsonProcessingException jpe) {
            throw new RuntimeException(jpe);
        }
    }
    // Serialize the collectors for sending to the supervisor task
    Map<Interval, byte[]> newMap = new HashMap<>();
    for (Map.Entry<Interval, HllSketch> entry : intervalToCardinalities.entrySet()) {
        newMap.put(entry.getKey(), entry.getValue().toCompactByteArray());
    }
    return newMap;
}
Also used : HllSketch(org.apache.datasketches.hll.HllSketch) HashMap(java.util.HashMap) Granularity(org.apache.druid.java.util.common.granularity.Granularity) DateTime(org.joda.time.DateTime) InputRow(org.apache.druid.data.input.InputRow) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) HashMap(java.util.HashMap) Map(java.util.Map) Interval(org.joda.time.Interval)

Example 10 with HllSketch

use of org.apache.datasketches.hll.HllSketch in project druid by druid-io.

the class DimensionCardinalityReportTest method testSupervisorDetermineNumShardsFromCardinalityReport.

@Test
public void testSupervisorDetermineNumShardsFromCardinalityReport() {
    List<DimensionCardinalityReport> reports = new ArrayList<>();
    HllSketch collector1 = DimensionCardinalityReport.createHllSketchForReport();
    collector1.update(IndexTask.HASH_FUNCTION.hashLong(1L).asBytes());
    collector1.update(IndexTask.HASH_FUNCTION.hashLong(200L).asBytes());
    DimensionCardinalityReport report1 = new DimensionCardinalityReport("taskA", ImmutableMap.of(Intervals.of("1970-01-01T00:00:00.000Z/1970-01-02T00:00:00.000Z"), collector1.toCompactByteArray()));
    reports.add(report1);
    HllSketch collector2 = DimensionCardinalityReport.createHllSketchForReport();
    collector2.update(IndexTask.HASH_FUNCTION.hashLong(1000L).asBytes());
    collector2.update(IndexTask.HASH_FUNCTION.hashLong(30000L).asBytes());
    DimensionCardinalityReport report2 = new DimensionCardinalityReport("taskB", ImmutableMap.of(Intervals.of("1970-01-01T00:00:00.000Z/1970-01-02T00:00:00.000Z"), collector2.toCompactByteArray()));
    reports.add(report2);
    // Separate interval with only 1 value
    HllSketch collector3 = DimensionCardinalityReport.createHllSketchForReport();
    collector3.update(IndexTask.HASH_FUNCTION.hashLong(99000L).asBytes());
    DimensionCardinalityReport report3 = new DimensionCardinalityReport("taskC", ImmutableMap.of(Intervals.of("1970-01-02T00:00:00.000Z/1970-01-03T00:00:00.000Z"), collector3.toCompactByteArray()));
    reports.add(report3);
    // first interval in test has cardinality 4
    Map<Interval, Integer> intervalToNumShards = ParallelIndexSupervisorTask.determineNumShardsFromCardinalityReport(reports, 1);
    Assert.assertEquals(ImmutableMap.of(Intervals.of("1970-01-01/P1D"), 4, Intervals.of("1970-01-02/P1D"), 1), intervalToNumShards);
    intervalToNumShards = ParallelIndexSupervisorTask.determineNumShardsFromCardinalityReport(reports, 2);
    Assert.assertEquals(ImmutableMap.of(Intervals.of("1970-01-01/P1D"), 2, Intervals.of("1970-01-02/P1D"), 1), intervalToNumShards);
    intervalToNumShards = ParallelIndexSupervisorTask.determineNumShardsFromCardinalityReport(reports, 3);
    Assert.assertEquals(ImmutableMap.of(Intervals.of("1970-01-01/P1D"), 1, Intervals.of("1970-01-02/P1D"), 1), intervalToNumShards);
    intervalToNumShards = ParallelIndexSupervisorTask.determineNumShardsFromCardinalityReport(reports, 4);
    Assert.assertEquals(ImmutableMap.of(Intervals.of("1970-01-01/P1D"), 1, Intervals.of("1970-01-02/P1D"), 1), intervalToNumShards);
    intervalToNumShards = ParallelIndexSupervisorTask.determineNumShardsFromCardinalityReport(reports, 5);
    Assert.assertEquals(ImmutableMap.of(Intervals.of("1970-01-01/P1D"), 1, Intervals.of("1970-01-02/P1D"), 1), intervalToNumShards);
}
Also used : HllSketch(org.apache.datasketches.hll.HllSketch) ArrayList(java.util.ArrayList) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

HllSketch (org.apache.datasketches.hll.HllSketch)11 Union (org.apache.datasketches.hll.Union)5 WritableMemory (org.apache.datasketches.memory.WritableMemory)3 Interval (org.joda.time.Interval)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 ThreadLocalRandom (java.util.concurrent.ThreadLocalRandom)2 Nullable (javax.annotation.Nullable)2 Granularity (org.apache.druid.java.util.common.granularity.Granularity)2 DateTime (org.joda.time.DateTime)2 JsonCreator (com.fasterxml.jackson.annotation.JsonCreator)1 JsonProperty (com.fasterxml.jackson.annotation.JsonProperty)1 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 SmileMediaTypes (com.fasterxml.jackson.jaxrs.smile.SmileMediaTypes)1 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 Throwables (com.google.common.base.Throwables)1 ArrayListMultimap (com.google.common.collect.ArrayListMultimap)1 ImmutableMap (com.google.common.collect.ImmutableMap)1