use of org.apache.druid.segment.data.Offset in project druid by druid-io.
the class Historical1SimpleDoubleAggPooledTopNScannerPrototype method scanAndAggregate.
/**
* Any changes to this method should be coordinated with {@link TopNUtils}, {@link
* PooledTopNAlgorithm#computeSpecializedScanAndAggregateImplementations} and downstream methods.
*
* It should be checked with a tool like https://github.com/AdoptOpenJDK/jitwatch that C2 compiler output for this
* method doesn't have any method calls in the while loop, i. e. all method calls are inlined. To be able to see
* assembly of this method in JITWatch and other similar tools, {@link
* PooledTopNAlgorithm#SPECIALIZE_HISTORICAL_ONE_SIMPLE_DOUBLE_AGG_POOLED_TOPN} should be turned off. Note that in this case
* the benchmark should be "naturally monomorphic", i. e. execute this method always with the same runtime shape.
*
* If the while loop contains not inlined method calls, it should be considered as a performance bug.
*/
@Override
public long scanAndAggregate(HistoricalDimensionSelector dimensionSelector, HistoricalColumnSelector metricSelector, SimpleDoubleBufferAggregator aggregator, int aggregatorSize, HistoricalCursor cursor, int[] positions, ByteBuffer resultsBuffer) {
// See TopNUtils.copyOffset() for explanation
Offset offset = (Offset) TopNUtils.copyOffset(cursor);
long processedRows = 0;
int positionToAllocate = 0;
while (offset.withinBounds() && !Thread.currentThread().isInterrupted()) {
int rowNum = offset.getOffset();
double metric = metricSelector.getDouble(rowNum);
final IndexedInts dimValues = dimensionSelector.getRow(rowNum);
final int dimSize = dimValues.size();
for (int i = 0; i < dimSize; i++) {
int dimIndex = dimValues.get(i);
int position = positions[dimIndex];
if (position >= 0) {
aggregator.aggregate(resultsBuffer, position, metric);
} else if (position == TopNAlgorithm.INIT_POSITION_VALUE) {
positions[dimIndex] = positionToAllocate;
aggregator.putFirst(resultsBuffer, positionToAllocate, metric);
positionToAllocate += aggregatorSize;
}
}
processedRows++;
offset.increment();
}
return processedRows;
}
use of org.apache.druid.segment.data.Offset in project druid by druid-io.
the class BitmapOffsetTest method testSanity.
@Test
public void testSanity() {
MutableBitmap mutable = factory.makeEmptyMutableBitmap();
for (int val : TEST_VALS) {
mutable.add(val);
}
ImmutableBitmap bitmap = factory.makeImmutableBitmap(mutable);
final BitmapOffset offset = BitmapOffset.of(bitmap, descending, bitmap.size());
final int[] expected = descending ? TEST_VALS_FLIP : TEST_VALS;
int count = 0;
while (offset.withinBounds()) {
Assert.assertEquals(expected[count], offset.getOffset());
int cloneCount = count;
Offset clonedOffset = offset.clone();
while (clonedOffset.withinBounds()) {
Assert.assertEquals(expected[cloneCount], clonedOffset.getOffset());
++cloneCount;
clonedOffset.increment();
}
++count;
offset.increment();
}
Assert.assertEquals(count, expected.length);
}
use of org.apache.druid.segment.data.Offset in project druid by druid-io.
the class HistoricalSingleValueDimSelector1SimpleDoubleAggPooledTopNScannerPrototype method scanAndAggregate.
/**
* Any changes to this method should be coordinated with {@link TopNUtils}, {@link
* PooledTopNAlgorithm#computeSpecializedScanAndAggregateImplementations} and downstream methods.
*
* It should be checked with a tool like https://github.com/AdoptOpenJDK/jitwatch that C2 compiler output for this
* method doesn't have any method calls in the while loop, i. e. all method calls are inlined. To be able to see
* assembly of this method in JITWatch and other similar tools, {@link
* PooledTopNAlgorithm#SPECIALIZE_HISTORICAL_SINGLE_VALUE_DIM_SELECTOR_ONE_SIMPLE_DOUBLE_AGG_POOLED_TOPN} should be turned off.
* Note that in this case the benchmark should be "naturally monomorphic", i. e. execute this method always with the
* same runtime shape.
*
* If the while loop contains not inlined method calls, it should be considered as a performance bug.
*/
@Override
public long scanAndAggregate(SingleValueHistoricalDimensionSelector dimensionSelector, HistoricalColumnSelector metricSelector, SimpleDoubleBufferAggregator aggregator, int aggregatorSize, HistoricalCursor cursor, int[] positions, ByteBuffer resultsBuffer) {
// See TopNUtils.copyOffset() for explanation
Offset offset = (Offset) TopNUtils.copyOffset(cursor);
long processedRows = 0;
int positionToAllocate = 0;
while (offset.withinBounds() && !Thread.currentThread().isInterrupted()) {
int rowNum = offset.getOffset();
int dimIndex = dimensionSelector.getRowValue(rowNum);
int position = positions[dimIndex];
if (position >= 0) {
aggregator.aggregate(resultsBuffer, position, metricSelector.getDouble(rowNum));
} else if (position == TopNAlgorithm.INIT_POSITION_VALUE) {
positions[dimIndex] = positionToAllocate;
aggregator.putFirst(resultsBuffer, positionToAllocate, metricSelector.getDouble(rowNum));
positionToAllocate += aggregatorSize;
}
processedRows++;
offset.increment();
}
return processedRows;
}
use of org.apache.druid.segment.data.Offset in project druid by druid-io.
the class QueryableIndexCursorSequenceBuilder method build.
public Sequence<Cursor> build(final Granularity gran) {
final Offset baseOffset;
if (filterBitmap == null) {
baseOffset = descending ? new SimpleDescendingOffset(index.getNumRows()) : new SimpleAscendingOffset(index.getNumRows());
} else {
baseOffset = BitmapOffset.of(filterBitmap, descending, index.getNumRows());
}
// Column caches shared amongst all cursors in this sequence.
final Map<String, BaseColumn> columnCache = new HashMap<>();
final NumericColumn timestamps = (NumericColumn) index.getColumnHolder(ColumnHolder.TIME_COLUMN_NAME).getColumn();
final Closer closer = Closer.create();
closer.register(timestamps);
Iterable<Interval> iterable = gran.getIterable(interval);
if (descending) {
iterable = Lists.reverse(ImmutableList.copyOf(iterable));
}
return Sequences.withBaggage(Sequences.map(Sequences.simple(iterable), new Function<Interval, Cursor>() {
@Override
public Cursor apply(final Interval inputInterval) {
final long timeStart = Math.max(interval.getStartMillis(), inputInterval.getStartMillis());
final long timeEnd = Math.min(interval.getEndMillis(), gran.increment(inputInterval.getStartMillis()));
if (descending) {
for (; baseOffset.withinBounds(); baseOffset.increment()) {
if (timestamps.getLongSingleValueRow(baseOffset.getOffset()) < timeEnd) {
break;
}
}
} else {
for (; baseOffset.withinBounds(); baseOffset.increment()) {
if (timestamps.getLongSingleValueRow(baseOffset.getOffset()) >= timeStart) {
break;
}
}
}
final Offset offset = descending ? new DescendingTimestampCheckingOffset(baseOffset, timestamps, timeStart, minDataTimestamp >= timeStart) : new AscendingTimestampCheckingOffset(baseOffset, timestamps, timeEnd, maxDataTimestamp < timeEnd);
final Offset baseCursorOffset = offset.clone();
final ColumnSelectorFactory columnSelectorFactory = new QueryableIndexColumnSelectorFactory(index, virtualColumns, descending, closer, baseCursorOffset.getBaseReadableOffset(), columnCache);
final DateTime myBucket = gran.toDateTime(inputInterval.getStartMillis());
if (postFilter == null) {
return new QueryableIndexCursor(baseCursorOffset, columnSelectorFactory, myBucket);
} else {
FilteredOffset filteredOffset = new FilteredOffset(baseCursorOffset, columnSelectorFactory, descending, postFilter, bitmapIndexSelector);
return new QueryableIndexCursor(filteredOffset, columnSelectorFactory, myBucket);
}
}
}), closer);
}
Aggregations