use of herddb.utils.DataAccessor in project herddb by diennea.
the class TableManager method scanWithStream.
private DataScanner scanWithStream(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
if (transaction != null) {
transaction.increaseRefcount();
}
try {
final TupleComparator comparator = statement.getComparator();
boolean sorted = comparator != null;
boolean sortedByClusteredIndex = comparator != null && comparator.isOnlyPrimaryKeyAndAscending() && keyToPageSortedAscending;
final Projection projection = statement.getProjection();
final boolean applyProjectionDuringScan = projection != null && !sorted;
ScanLimits limits = statement.getLimits();
int maxRows = limits == null ? 0 : limits.computeMaxRows(context);
int offset = limits == null ? 0 : limits.computeOffset(context);
Stream<DataAccessor> result;
Function<Record, DataAccessor> mapper = (Record record) -> {
DataAccessor tuple;
if (applyProjectionDuringScan) {
tuple = projection.map(record.getDataAccessor(table), context);
} else {
tuple = record.getDataAccessor(table);
}
return tuple;
};
Stream<Record> recordsFromTransactionSorted = streamTransactionData(transaction, statement.getPredicate(), context);
Stream<DataAccessor> fromTransactionSorted = recordsFromTransactionSorted != null ? recordsFromTransactionSorted.map(mapper) : null;
if (fromTransactionSorted != null && comparator != null) {
fromTransactionSorted = fromTransactionSorted.sorted(comparator);
}
Stream<DataAccessor> tableData = streamTableData(statement, context, transaction, lockRequired, forWrite).map(mapper);
if (maxRows > 0) {
if (sortedByClusteredIndex) {
// already sorted if needed
if (fromTransactionSorted != null) {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
// no need to re-sort
result = tableData;
}
} else if (sorted) {
// need to sort
tableData = tableData.sorted(comparator);
// already sorted if needed
if (fromTransactionSorted != null) {
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
tableData = tableData.limit(maxRows + offset);
// no need to sort again
result = tableData;
}
} else if (fromTransactionSorted == null) {
result = tableData;
} else {
result = Stream.concat(fromTransactionSorted, tableData);
}
} else {
if (sortedByClusteredIndex) {
// already sorted from index
if (fromTransactionSorted != null) {
tableData = tableData.sorted(comparator);
// fromTransactionSorted is already sorted
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData;
}
} else if (sorted) {
// we need to re-sort
if (fromTransactionSorted != null) {
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData.sorted(comparator);
}
} else if (fromTransactionSorted != null) {
// no need to sort
result = Stream.concat(fromTransactionSorted, tableData);
} else {
result = tableData;
}
}
if (offset > 0) {
result = result.skip(offset);
}
if (maxRows > 0) {
result = result.limit(maxRows);
}
if (!applyProjectionDuringScan && projection != null) {
result = result.map(r -> projection.map(r, context));
}
String[] fieldNames;
Column[] columns;
if (projection != null) {
fieldNames = projection.getFieldNames();
columns = projection.getColumns();
} else {
fieldNames = table.columnNames;
columns = table.columns;
}
return new StreamDataScanner(transaction, fieldNames, columns, result);
} finally {
if (transaction != null) {
transaction.decreaseRefCount();
}
}
}
use of herddb.utils.DataAccessor in project herddb by diennea.
the class FileRecordSet method sort.
@Override
public void sort(TupleComparator comparator) {
if (!writeFinished) {
throw new IllegalStateException("RecordSet is still in write mode");
}
if (comparator != null) {
if (!buffer.isSwapped()) {
buffer.sortBuffer(comparator);
} else {
List<DataAccessor> copyInMemory = new ArrayList<>();
for (DataAccessor tuple : buffer) {
copyInMemory.add(tuple);
}
copyInMemory.sort(comparator);
buffer.close();
DiskArrayList<DataAccessor> newBuffer = new DiskArrayList<>(buffer.isSwapped() ? -1 : Integer.MAX_VALUE, tmpDirectory, new TupleSerializer(columns, fieldNames));
newBuffer.enableCompression();
for (DataAccessor t : copyInMemory) {
newBuffer.add(t);
}
newBuffer.finish();
buffer = newBuffer;
}
}
}
use of herddb.utils.DataAccessor in project herddb by diennea.
the class FileRecordSet method applyProjection.
@Override
public void applyProjection(Projection projection, StatementEvaluationContext context) throws StatementExecutionException {
this.columns = projection.getColumns();
this.fieldNames = projection.getFieldNames();
DiskArrayList<DataAccessor> projected = new DiskArrayList<>(buffer.isSwapped() ? -1 : Integer.MAX_VALUE, tmpDirectory, new TupleSerializer(columns, fieldNames));
projected.enableCompression();
for (DataAccessor record : buffer) {
projected.add(projection.map(record, context));
}
projected.finish();
this.buffer.close();
this.buffer = projected;
}
use of herddb.utils.DataAccessor in project herddb by diennea.
the class BRINIndexManager method rebuild.
@Override
public void rebuild() throws DataStorageManagerException {
long _start = System.currentTimeMillis();
LOGGER.log(Level.FINE, "building index {0}", index.name);
dataStorageManager.initIndex(tableSpaceUUID, index.uuid);
data.reset();
Table table = tableManager.getTable();
AtomicLong count = new AtomicLong();
tableManager.scanForIndexRebuild(r -> {
DataAccessor values = r.getDataAccessor(table);
Bytes key = RecordSerializer.serializeIndexKey(values, table, table.primaryKey);
Bytes indexKey = RecordSerializer.serializeIndexKey(values, index, index.columnNames);
// LOGGER.log(Level.SEVERE, "adding " + key + " -> " + values);
recordInserted(key, indexKey);
count.incrementAndGet();
});
long _stop = System.currentTimeMillis();
if (count.intValue() > 0) {
LOGGER.log(Level.INFO, "building index {0} took {1}, scanned {2} records", new Object[] { index.name, (_stop - _start) + " ms", count });
}
}
use of herddb.utils.DataAccessor in project herddb by diennea.
the class StreamDataScanner method next.
@Override
public DataAccessor next() throws DataScannerException {
DataAccessor current = next;
fetchNext();
return current;
}
Aggregations