use of io.questdb.cairo.vm.api.MemoryR in project questdb by bluestreak01.
the class TableReader method minDouble.
public double minDouble(int columnIndex) {
double min = Double.POSITIVE_INFINITY;
for (int i = 0; i < partitionCount; i++) {
openPartition(i);
final int base = getColumnBase(i);
final int index = getPrimaryColumnIndex(base, columnIndex);
final MemoryR column = columns.getQuick(index);
if (column != null) {
final long count = column.getPageSize() / Double.BYTES;
for (int pageIndex = 0, pageCount = column.getPageCount(); pageIndex < pageCount; pageIndex++) {
long a = column.getPageAddress(pageIndex);
double x = Vect.minDouble(a, count);
if (x < min) {
min = x;
}
}
}
}
return min;
}
use of io.questdb.cairo.vm.api.MemoryR in project questdb by bluestreak01.
the class AbstractIntervalDataFrameCursor method computeSize.
private long computeSize() {
int intervalsLo = this.intervalsLo;
int intervalsHi = this.intervalsHi;
int partitionLo = this.partitionLo;
int partitionHi = this.partitionHi;
long partitionLimit = this.partitionLimit;
long size = this.sizeSoFar;
while (intervalsLo < intervalsHi && partitionLo < partitionHi) {
// We don't need to worry about column tops and null column because we
// are working with timestamp. Timestamp column cannot be added to existing table.
long rowCount = reader.openPartition(partitionLo);
if (rowCount > 0) {
final MemoryR column = reader.getColumn(TableReader.getPrimaryColumnIndex(reader.getColumnBase(partitionLo), timestampIndex));
final long intervalLo = intervals.getQuick(intervalsLo * 2);
final long intervalHi = intervals.getQuick(intervalsLo * 2 + 1);
final long partitionTimestampLo = column.getLong(0);
// interval is wholly above partition, skip interval
if (partitionTimestampLo > intervalHi) {
intervalsLo++;
continue;
}
final long partitionTimestampHi = column.getLong((rowCount - 1) * 8);
// interval is wholly below partition, skip partition
if (partitionTimestampHi < intervalLo) {
partitionLimit = 0;
partitionLo++;
continue;
}
// calculate intersection
long lo;
if (partitionTimestampLo == intervalLo) {
lo = 0;
} else {
lo = search(column, intervalLo, partitionLimit, rowCount, AbstractIntervalDataFrameCursor.SCAN_UP);
if (lo < 0) {
lo = -lo - 1;
}
}
long hi = search(column, intervalHi, lo, rowCount, AbstractIntervalDataFrameCursor.SCAN_DOWN);
if (hi < 0) {
hi = -hi - 1;
} else {
// We have direct hit. Interval is inclusive of edges and we have to
// bump to high bound because it is non-inclusive
hi++;
}
if (lo < hi) {
size += (hi - lo);
// we do have whole partition of fragment?
if (hi == rowCount) {
// whole partition, will need to skip to next one
partitionLimit = 0;
partitionLo++;
} else {
// only fragment, need to skip to next interval
partitionLimit = hi;
intervalsLo++;
}
continue;
}
// interval yielded empty data frame
partitionLimit = hi;
intervalsLo++;
} else {
// partition was empty, just skip to next
partitionLo++;
}
}
return this.size = size;
}
use of io.questdb.cairo.vm.api.MemoryR in project questdb by bluestreak01.
the class IntervalBwdDataFrameCursor method next.
@Override
public DataFrame next() {
// we are not calculating partition rages when intervals are empty
while (intervalsLo < intervalsHi && partitionLo < partitionHi) {
// We don't need to worry about column tops and null column because we
// are working with timestamp. Timestamp column cannot be added to existing table.
final int currentInterval = intervalsHi - 1;
final int currentPartition = partitionHi - 1;
long rowCount = reader.openPartition(currentPartition);
if (rowCount > 0) {
final MemoryR column = reader.getColumn(TableReader.getPrimaryColumnIndex(reader.getColumnBase(currentPartition), timestampIndex));
final long intervalLo = intervals.getQuick(currentInterval * 2);
final long intervalHi = intervals.getQuick(currentInterval * 2 + 1);
final long limitHi;
if (partitionLimit == -1) {
limitHi = rowCount - 1;
} else {
limitHi = partitionLimit - 1;
}
final long partitionTimestampLo = column.getLong(0);
LOG.debug().$("next [partition=").$(currentPartition).$(", intervalLo=").microTime(intervalLo).$(", intervalHi=").microTime(intervalHi).$(", partitionLo=").microTime(partitionTimestampLo).$(", limitHi=").$(limitHi).$(", rowCount=").$(rowCount).$(", currentInterval=").$(currentInterval).$(']').$();
// interval is wholly above partition, skip partition
if (partitionTimestampLo > intervalHi) {
skipPartition(currentPartition);
continue;
}
// interval is wholly below partition, skip interval
final long partitionTimestampHi = column.getLong(limitHi * Long.BYTES);
if (partitionTimestampHi < intervalLo) {
skipInterval(currentInterval, limitHi + 1);
continue;
}
// calculate intersection for inclusive intervals "intervalLo" and "intervalHi"
final long lo;
if (partitionTimestampLo < intervalLo) {
lo = BinarySearch.find(column, intervalLo - 1, 0, limitHi, BinarySearch.SCAN_DOWN) + 1;
} else {
lo = 0;
}
final long hi;
if (partitionTimestampHi > intervalHi) {
hi = BinarySearch.find(column, intervalHi, lo, limitHi, BinarySearch.SCAN_DOWN) + 1;
} else {
hi = limitHi + 1;
}
if (lo == 0) {
// interval yielded empty data frame, skip partition
skipPartition(currentPartition);
} else {
// only fragment, need to skip to next interval
skipInterval(currentInterval, lo);
}
if (lo < hi) {
dataFrame.partitionIndex = currentPartition;
dataFrame.rowLo = lo;
dataFrame.rowHi = hi;
sizeSoFar += hi - lo;
return dataFrame;
}
} else {
// partition was empty, just skip to next
partitionLimit = -1;
partitionHi = currentPartition;
}
}
return null;
}
use of io.questdb.cairo.vm.api.MemoryR in project questdb by bluestreak01.
the class IntervalFwdDataFrameCursor method next.
@Override
public DataFrame next() {
// we are not calculating partition rages when intervals are empty
while (intervalsLo < intervalsHi && partitionLo < partitionHi) {
// We don't need to worry about column tops and null column because we
// are working with timestamp. Timestamp column cannot be added to existing table.
long rowCount = reader.openPartition(partitionLo);
if (rowCount > 0) {
final MemoryR column = reader.getColumn(TableReader.getPrimaryColumnIndex(reader.getColumnBase(partitionLo), timestampIndex));
final long intervalLo = intervals.getQuick(intervalsLo * 2);
final long intervalHi = intervals.getQuick(intervalsLo * 2 + 1);
final long partitionTimestampLo = column.getLong(0);
// interval is wholly above partition, skip interval
if (partitionTimestampLo > intervalHi) {
intervalsLo++;
continue;
}
final long partitionTimestampHi = column.getLong((rowCount - 1) * Long.BYTES);
// interval is wholly below partition, skip partition
if (partitionTimestampHi < intervalLo) {
partitionLimit = 0;
partitionLo++;
continue;
}
// calculate intersection
long lo;
if (partitionTimestampLo < intervalLo) {
// IntervalLo is inclusive of value. We will look for bottom index of intervalLo - 1
// and then do index + 1 to skip to top of where we need to be.
// We are not scanning up on the exact value of intervalLo because it may not exist. In which case
// the search function will scan up to top of the lower value.
lo = BinarySearch.find(column, intervalLo - 1, partitionLimit, rowCount - 1, BinarySearch.SCAN_DOWN) + 1;
} else {
lo = 0;
}
final long hi;
if (partitionTimestampHi > intervalHi) {
hi = BinarySearch.find(column, intervalHi, lo, rowCount - 1, BinarySearch.SCAN_DOWN) + 1;
} else {
hi = rowCount;
}
if (lo < hi) {
dataFrame.partitionIndex = partitionLo;
dataFrame.rowLo = lo;
dataFrame.rowHi = hi;
sizeSoFar += (hi - lo);
// we do have whole partition of fragment?
if (hi == rowCount) {
// whole partition, will need to skip to next one
partitionLimit = 0;
partitionLo++;
} else {
// only fragment, need to skip to next interval
partitionLimit = hi;
intervalsLo++;
}
return dataFrame;
}
// interval yielded empty data frame
partitionLimit = hi;
intervalsLo++;
} else {
// partition was empty, just skip to next
partitionLo++;
}
}
return null;
}
use of io.questdb.cairo.vm.api.MemoryR in project questdb by bluestreak01.
the class LatestByAllIndexedRecordCursor method buildTreeMap.
@Override
protected void buildTreeMap(SqlExecutionContext executionContext) throws SqlException {
final MessageBus bus = executionContext.getMessageBus();
final RingQueue<LatestByTask> queue = bus.getLatestByQueue();
final Sequence pubSeq = bus.getLatestByPubSeq();
final Sequence subSeq = bus.getLatestBySubSeq();
int keyCount = getSymbolTable(columnIndex).size() + 1;
rows.extend(keyCount);
GeoHashNative.iota(rows.getAddress(), rows.getCapacity(), 0);
final int workerCount = executionContext.getWorkerCount();
final long chunkSize = (keyCount + workerCount - 1) / workerCount;
final int taskCount = (int) ((keyCount + chunkSize - 1) / chunkSize);
final long argumentsAddress = LatestByArguments.allocateMemoryArray(taskCount);
for (long i = 0; i < taskCount; ++i) {
final long klo = i * chunkSize;
final long khi = Long.min(klo + chunkSize, keyCount);
final long argsAddress = argumentsAddress + i * LatestByArguments.MEMORY_SIZE;
LatestByArguments.setRowsAddress(argsAddress, rows.getAddress());
LatestByArguments.setRowsCapacity(argsAddress, rows.getCapacity());
LatestByArguments.setKeyLo(argsAddress, klo);
LatestByArguments.setKeyHi(argsAddress, khi);
LatestByArguments.setRowsSize(argsAddress, 0);
}
int hashColumnIndex = -1;
int hashColumnType = ColumnType.UNDEFINED;
long prefixesAddress = 0;
long prefixesCount = 0;
if (this.prefixes.size() > 2) {
hashColumnIndex = (int) prefixes.get(0);
hashColumnType = (int) prefixes.get(1);
prefixesAddress = prefixes.getAddress() + 2 * Long.BYTES;
prefixesCount = prefixes.size() - 2;
}
DataFrame frame;
// frame metadata is based on TableReader, which is "full" metadata
// this cursor works with subset of columns, which warrants column index remap
int frameColumnIndex = columnIndexes.getQuick(columnIndex);
final TableReader reader = this.dataFrameCursor.getTableReader();
long foundRowCount = 0;
while ((frame = this.dataFrameCursor.next()) != null && foundRowCount < keyCount) {
doneLatch.reset();
final BitmapIndexReader indexReader = frame.getBitmapIndexReader(frameColumnIndex, BitmapIndexReader.DIR_BACKWARD);
final long rowLo = frame.getRowLo();
final long rowHi = frame.getRowHi() - 1;
final long keyBaseAddress = indexReader.getKeyBaseAddress();
final long keysMemorySize = indexReader.getKeyMemorySize();
final long valueBaseAddress = indexReader.getValueBaseAddress();
final long valuesMemorySize = indexReader.getValueMemorySize();
final int valueBlockCapacity = indexReader.getValueBlockCapacity();
final long unIndexedNullCount = indexReader.getUnIndexedNullCount();
final int partitionIndex = frame.getPartitionIndex();
long hashColumnAddress = 0;
// hashColumnIndex can be -1 for latest by part only (no prefixes to match)
if (hashColumnIndex > -1) {
final int columnBase = reader.getColumnBase(partitionIndex);
final int primaryColumnIndex = TableReader.getPrimaryColumnIndex(columnBase, hashColumnIndex);
final MemoryR column = reader.getColumn(primaryColumnIndex);
hashColumnAddress = column.getPageAddress(0);
}
// -1 must be dead case here
final int hashesColumnSize = ColumnType.isGeoHash(hashColumnType) ? getPow2SizeOfGeoHashType(hashColumnType) : -1;
int queuedCount = 0;
for (long i = 0; i < taskCount; ++i) {
final long argsAddress = argumentsAddress + i * LatestByArguments.MEMORY_SIZE;
final long found = LatestByArguments.getRowsSize(argsAddress);
final long keyHi = LatestByArguments.getKeyHi(argsAddress);
final long keyLo = LatestByArguments.getKeyLo(argsAddress);
// Skip range if all keys found
if (found >= keyHi - keyLo) {
continue;
}
// Update hash column address with current frame value
LatestByArguments.setHashesAddress(argsAddress, hashColumnAddress);
final long seq = pubSeq.next();
if (seq < 0) {
GeoHashNative.latestByAndFilterPrefix(keyBaseAddress, keysMemorySize, valueBaseAddress, valuesMemorySize, argsAddress, unIndexedNullCount, rowHi, rowLo, partitionIndex, valueBlockCapacity, hashColumnAddress, hashesColumnSize, prefixesAddress, prefixesCount);
} else {
queue.get(seq).of(keyBaseAddress, keysMemorySize, valueBaseAddress, valuesMemorySize, argsAddress, unIndexedNullCount, rowHi, rowLo, partitionIndex, valueBlockCapacity, hashColumnAddress, hashesColumnSize, prefixesAddress, prefixesCount, doneLatch);
pubSeq.done(seq);
queuedCount++;
}
}
// this should fix deadlock with 1 worker configuration
while (doneLatch.getCount() > -queuedCount) {
long seq = subSeq.next();
if (seq > -1) {
queue.get(seq).run();
subSeq.done(seq);
}
}
doneLatch.await(queuedCount);
// Reset found counter
foundRowCount = 0;
for (int i = 0; i < taskCount; i++) {
final long address = argumentsAddress + i * LatestByArguments.MEMORY_SIZE;
foundRowCount += LatestByArguments.getRowsSize(address);
}
}
final long rowCount = GeoHashNative.slideFoundBlocks(argumentsAddress, taskCount);
LatestByArguments.releaseMemoryArray(argumentsAddress, taskCount);
aLimit = rowCount;
aIndex = indexShift;
postProcessRows();
}
Aggregations