use of herddb.model.StatementExecutionException in project herddb by diennea.
the class TableManager method scanForIndexRebuild.
@Override
public void scanForIndexRebuild(Consumer<Record> records) throws DataStorageManagerException {
LocalScanPageCache localPageCache = new LocalScanPageCache();
Consumer<Map.Entry<Bytes, Long>> scanExecutor = (Map.Entry<Bytes, Long> entry) -> {
Bytes key = entry.getKey();
LockHandle lock = lockForRead(key, null);
try {
Long pageId = entry.getValue();
if (pageId != null) {
Record record = fetchRecord(key, pageId, localPageCache);
if (record != null) {
records.accept(record);
}
}
} catch (DataStorageManagerException | StatementExecutionException error) {
throw new RuntimeException(error);
} finally {
locksManager.releaseReadLock(lock);
}
};
try {
Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(null, StatementEvaluationContext.DEFAULT_EVALUATION_CONTEXT(), tableContext, null);
scanner.forEach(scanExecutor);
} catch (StatementExecutionException impossible) {
throw new DataStorageManagerException(impossible);
}
}
use of herddb.model.StatementExecutionException in project herddb by diennea.
the class TableManager method scanWithStream.
private DataScanner scanWithStream(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
if (transaction != null) {
transaction.increaseRefcount();
}
try {
final TupleComparator comparator = statement.getComparator();
boolean sorted = comparator != null;
boolean sortedByClusteredIndex = comparator != null && comparator.isOnlyPrimaryKeyAndAscending() && keyToPageSortedAscending;
final Projection projection = statement.getProjection();
final boolean applyProjectionDuringScan = projection != null && !sorted;
ScanLimits limits = statement.getLimits();
int maxRows = limits == null ? 0 : limits.computeMaxRows(context);
int offset = limits == null ? 0 : limits.computeOffset(context);
Stream<DataAccessor> result;
Function<Record, DataAccessor> mapper = (Record record) -> {
DataAccessor tuple;
if (applyProjectionDuringScan) {
tuple = projection.map(record.getDataAccessor(table), context);
} else {
tuple = record.getDataAccessor(table);
}
return tuple;
};
Stream<Record> recordsFromTransactionSorted = streamTransactionData(transaction, statement.getPredicate(), context);
Stream<DataAccessor> fromTransactionSorted = recordsFromTransactionSorted != null ? recordsFromTransactionSorted.map(mapper) : null;
if (fromTransactionSorted != null && comparator != null) {
fromTransactionSorted = fromTransactionSorted.sorted(comparator);
}
Stream<DataAccessor> tableData = streamTableData(statement, context, transaction, lockRequired, forWrite).map(mapper);
if (maxRows > 0) {
if (sortedByClusteredIndex) {
// already sorted if needed
if (fromTransactionSorted != null) {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
// no need to re-sort
result = tableData;
}
} else if (sorted) {
// need to sort
tableData = tableData.sorted(comparator);
// already sorted if needed
if (fromTransactionSorted != null) {
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
tableData = tableData.limit(maxRows + offset);
// no need to sort again
result = tableData;
}
} else if (fromTransactionSorted == null) {
result = tableData;
} else {
result = Stream.concat(fromTransactionSorted, tableData);
}
} else {
if (sortedByClusteredIndex) {
// already sorted from index
if (fromTransactionSorted != null) {
tableData = tableData.sorted(comparator);
// fromTransactionSorted is already sorted
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData;
}
} else if (sorted) {
// we need to re-sort
if (fromTransactionSorted != null) {
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData.sorted(comparator);
}
} else if (fromTransactionSorted != null) {
// no need to sort
result = Stream.concat(fromTransactionSorted, tableData);
} else {
result = tableData;
}
}
if (offset > 0) {
result = result.skip(offset);
}
if (maxRows > 0) {
result = result.limit(maxRows);
}
if (!applyProjectionDuringScan && projection != null) {
result = result.map(r -> projection.map(r, context));
}
String[] fieldNames;
Column[] columns;
if (projection != null) {
fieldNames = projection.getFieldNames();
columns = projection.getColumns();
} else {
fieldNames = table.columnNames;
columns = table.columns;
}
return new StreamDataScanner(transaction, fieldNames, columns, result);
} finally {
if (transaction != null) {
transaction.decreaseRefCount();
}
}
}
use of herddb.model.StatementExecutionException in project herddb by diennea.
the class TableManager method lockForRead.
private static LockHandle lockForRead(Bytes key, Transaction transaction, String lockKey, ILocalLockManager locksManager) {
try {
if (transaction != null) {
LockHandle lock = transaction.lookupLock(lockKey, key);
if (lock != null) {
// transaction already locked the key
return lock;
} else {
lock = locksManager.acquireReadLockForKey(key);
transaction.registerLockOnTable(lockKey, lock);
return lock;
}
} else {
return locksManager.acquireReadLockForKey(key);
}
} catch (RuntimeException err) {
// locktimeout or other internal lockmanager error
throw new StatementExecutionException(err);
}
}
use of herddb.model.StatementExecutionException in project herddb by diennea.
the class TableManager method accessTableData.
private void accessTableData(ScanStatement statement, StatementEvaluationContext context, ScanResultOperation consumer, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
statement.validateContext(context);
Predicate predicate = statement.getPredicate();
long _start = System.currentTimeMillis();
boolean acquireLock = transaction != null || forWrite || lockRequired;
LocalScanPageCache lastPageRead = acquireLock ? null : new LocalScanPageCache();
AtomicInteger count = new AtomicInteger();
try {
IndexOperation indexOperation = predicate != null ? predicate.getIndexOperation() : null;
boolean primaryIndexSeek = indexOperation instanceof PrimaryIndexSeek;
AbstractIndexManager useIndex = getIndexForTbleAccess(indexOperation);
class RecordProcessor implements BatchOrderedExecutor.Executor<Entry<Bytes, Long>>, Consumer<Map.Entry<Bytes, Long>> {
@Override
public void execute(List<Map.Entry<Bytes, Long>> batch) throws HerdDBInternalException {
batch.forEach((entry) -> {
accept(entry);
});
}
@Override
public void accept(Entry<Bytes, Long> entry) throws DataStorageManagerException, StatementExecutionException, LogNotAvailableException {
if (transaction != null && count.incrementAndGet() % 1000 == 0) {
transaction.touch();
}
Bytes key = entry.getKey();
boolean already_locked = transaction != null && transaction.lookupLock(table.name, key) != null;
boolean record_discarded = !already_locked;
LockHandle lock = acquireLock ? (forWrite ? lockForWrite(key, transaction) : lockForRead(key, transaction)) : null;
// LOGGER.log(Level.SEVERE, "CREATED LOCK " + lock + " for " + key);
try {
if (transaction != null) {
if (transaction.recordDeleted(table.name, key)) {
// skip this record. inside current transaction it has been deleted
return;
}
Record record = transaction.recordUpdated(table.name, key);
if (record != null) {
// use current transaction version of the record
if (predicate == null || predicate.evaluate(record, context)) {
// now the consumer is the owner of the lock on the record
record_discarded = false;
consumer.accept(record, null);
}
return;
}
}
Long pageId = entry.getValue();
if (pageId != null) {
boolean pkFilterCompleteMatch = false;
if (!primaryIndexSeek && predicate != null) {
Predicate.PrimaryKeyMatchOutcome outcome = predicate.matchesRawPrimaryKey(key, context);
if (outcome == Predicate.PrimaryKeyMatchOutcome.FAILED) {
return;
} else if (outcome == Predicate.PrimaryKeyMatchOutcome.FULL_CONDITION_VERIFIED) {
pkFilterCompleteMatch = true;
}
}
Record record = fetchRecord(key, pageId, lastPageRead);
if (record != null && (pkFilterCompleteMatch || predicate == null || predicate.evaluate(record, context))) {
// now the consumer is the owner of the lock on the record
record_discarded = false;
consumer.accept(record, transaction == null ? lock : null);
}
}
} finally {
// release the lock on the key if it did not match scan criteria
if (record_discarded) {
if (transaction == null) {
locksManager.releaseLock(lock);
} else if (!already_locked) {
transaction.releaseLockOnKey(table.name, key, locksManager);
}
}
}
}
}
RecordProcessor scanExecutor = new RecordProcessor();
boolean exit = false;
try {
if (primaryIndexSeek) {
// we are expecting at most one record, no need for BatchOrderedExecutor
// this is the most common case for UPDATE-BY-PK and SELECT-BY-PK
// no need to craete and use Streams
PrimaryIndexSeek seek = (PrimaryIndexSeek) indexOperation;
Bytes value = Bytes.from_array(seek.value.computeNewValue(null, context, tableContext));
Long page = keyToPage.get(value);
if (page != null) {
Map.Entry<Bytes, Long> singleEntry = new AbstractMap.SimpleImmutableEntry<>(value, page);
scanExecutor.accept(singleEntry);
}
} else {
Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(indexOperation, context, tableContext, useIndex);
BatchOrderedExecutor<Map.Entry<Bytes, Long>> executor = new BatchOrderedExecutor<>(SORTED_PAGE_ACCESS_WINDOW_SIZE, scanExecutor, SORTED_PAGE_ACCESS_COMPARATOR);
scanner.forEach(executor);
executor.finish();
}
} catch (ExitLoop exitLoop) {
exit = !exitLoop.continueWithTransactionData;
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.log(Level.FINEST, "exit loop during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), exitLoop.toString() });
}
} catch (final HerdDBInternalException error) {
LOGGER.log(Level.SEVERE, "error during scan", error);
if (error.getCause() instanceof StatementExecutionException) {
throw (StatementExecutionException) error.getCause();
} else if (error.getCause() instanceof DataStorageManagerException) {
throw (DataStorageManagerException) error.getCause();
} else if (error instanceof StatementExecutionException) {
throw error;
} else if (error instanceof DataStorageManagerException) {
throw error;
} else {
throw new StatementExecutionException(error);
}
}
if (!exit && transaction != null) {
consumer.beginNewRecordsInTransactionBlock();
Collection<Record> newRecordsForTable = transaction.getNewRecordsForTable(table.name);
if (newRecordsForTable != null) {
newRecordsForTable.forEach(record -> {
if (!transaction.recordDeleted(table.name, record.key) && (predicate == null || predicate.evaluate(record, context))) {
consumer.accept(record, null);
}
});
}
}
} catch (ExitLoop exitLoop) {
if (LOGGER.isLoggable(Level.FINEST)) {
LOGGER.log(Level.FINEST, "exit loop during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), exitLoop.toString() });
}
} catch (StatementExecutionException err) {
LOGGER.log(Level.SEVERE, "error during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), err.toString() });
throw err;
} catch (HerdDBInternalException err) {
LOGGER.log(Level.SEVERE, "error during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), err.toString() });
throw new StatementExecutionException(err);
}
}
use of herddb.model.StatementExecutionException in project herddb by diennea.
the class TableManager method streamTableData.
private Stream<Record> streamTableData(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
statement.validateContext(context);
Predicate predicate = statement.getPredicate();
boolean acquireLock = transaction != null || forWrite || lockRequired;
LocalScanPageCache lastPageRead = acquireLock ? null : new LocalScanPageCache();
IndexOperation indexOperation = predicate != null ? predicate.getIndexOperation() : null;
boolean primaryIndexSeek = indexOperation instanceof PrimaryIndexSeek;
AbstractIndexManager useIndex = getIndexForTbleAccess(indexOperation);
Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(indexOperation, context, tableContext, useIndex);
Stream<Record> resultFromTable = scanner.map(entry -> {
return accessRecord(entry, predicate, context, transaction, lastPageRead, primaryIndexSeek, forWrite, acquireLock);
}).filter(r -> r != null);
return resultFromTable;
}
Aggregations