use of herddb.model.commands.ScanStatement in project herddb by diennea.
the class DBManager method executePlan.
public StatementExecutionResult executePlan(ExecutionPlan plan, StatementEvaluationContext context, TransactionContext transactionContext) throws StatementExecutionException {
context.setManager(this);
plan.validateContext(context);
if (plan.mainStatement instanceof ScanStatement) {
DataScanner result = scan((ScanStatement) plan.mainStatement, context, transactionContext);
// transction can be auto generated during the scan
transactionContext = new TransactionContext(result.transactionId);
return executeDataScannerPlan(plan, result, context, transactionContext);
} else if (plan.dataSource != null) {
// INSERT from SELECT
try {
ScanResult data = (ScanResult) executePlan(plan.dataSource, context, transactionContext);
int insertCount = 0;
try {
// transction can be auto generated during the scan
transactionContext = new TransactionContext(data.transactionId);
while (data.dataScanner.hasNext()) {
DataAccessor tuple = data.dataScanner.next();
SQLStatementEvaluationContext tmp_context = new SQLStatementEvaluationContext("--", Arrays.asList(tuple.getValues()));
DMLStatementExecutionResult res = (DMLStatementExecutionResult) executeStatement(plan.mainStatement, tmp_context, transactionContext);
insertCount += res.getUpdateCount();
}
} finally {
data.dataScanner.close();
}
return new DMLStatementExecutionResult(transactionContext.transactionId, insertCount);
} catch (DataScannerException err) {
throw new StatementExecutionException(err);
}
} else if (plan.joinStatements != null) {
List<DataScanner> scanResults = new ArrayList<>();
for (ScanStatement statement : plan.joinStatements) {
DataScanner result = scan(statement, context, transactionContext);
// transction can be auto generated during the scan
transactionContext = new TransactionContext(result.transactionId);
scanResults.add(result);
}
return executeJoinedScansPlan(scanResults, context, transactionContext, plan);
} else if (plan.insertStatements != null) {
int insertCount = 0;
for (InsertStatement insert : plan.insertStatements) {
DMLStatementExecutionResult res = (DMLStatementExecutionResult) executeStatement(insert, context, transactionContext);
// transction can be auto generated during the loop
transactionContext = new TransactionContext(res.transactionId);
insertCount += res.getUpdateCount();
}
return new DMLStatementExecutionResult(transactionContext.transactionId, insertCount);
} else {
return executeStatement(plan.mainStatement, context, transactionContext);
}
}
use of herddb.model.commands.ScanStatement in project herddb by diennea.
the class TableManager method scanWithStream.
private DataScanner scanWithStream(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
if (transaction != null) {
transaction.increaseRefcount();
}
try {
final TupleComparator comparator = statement.getComparator();
boolean sorted = comparator != null;
boolean sortedByClusteredIndex = comparator != null && comparator.isOnlyPrimaryKeyAndAscending() && keyToPageSortedAscending;
final Projection projection = statement.getProjection();
final boolean applyProjectionDuringScan = projection != null && !sorted;
ScanLimits limits = statement.getLimits();
int maxRows = limits == null ? 0 : limits.computeMaxRows(context);
int offset = limits == null ? 0 : limits.computeOffset(context);
Stream<DataAccessor> result;
Function<Record, DataAccessor> mapper = (Record record) -> {
DataAccessor tuple;
if (applyProjectionDuringScan) {
tuple = projection.map(record.getDataAccessor(table), context);
} else {
tuple = record.getDataAccessor(table);
}
return tuple;
};
Stream<Record> recordsFromTransactionSorted = streamTransactionData(transaction, statement.getPredicate(), context);
Stream<DataAccessor> fromTransactionSorted = recordsFromTransactionSorted != null ? recordsFromTransactionSorted.map(mapper) : null;
if (fromTransactionSorted != null && comparator != null) {
fromTransactionSorted = fromTransactionSorted.sorted(comparator);
}
Stream<DataAccessor> tableData = streamTableData(statement, context, transaction, lockRequired, forWrite).map(mapper);
if (maxRows > 0) {
if (sortedByClusteredIndex) {
// already sorted if needed
if (fromTransactionSorted != null) {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
// no need to re-sort
result = tableData;
}
} else if (sorted) {
// need to sort
tableData = tableData.sorted(comparator);
// already sorted if needed
if (fromTransactionSorted != null) {
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
tableData = tableData.limit(maxRows + offset);
// no need to sort again
result = tableData;
}
} else if (fromTransactionSorted == null) {
result = tableData;
} else {
result = Stream.concat(fromTransactionSorted, tableData);
}
} else {
if (sortedByClusteredIndex) {
// already sorted from index
if (fromTransactionSorted != null) {
tableData = tableData.sorted(comparator);
// fromTransactionSorted is already sorted
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData;
}
} else if (sorted) {
// we need to re-sort
if (fromTransactionSorted != null) {
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData.sorted(comparator);
}
} else if (fromTransactionSorted != null) {
// no need to sort
result = Stream.concat(fromTransactionSorted, tableData);
} else {
result = tableData;
}
}
if (offset > 0) {
result = result.skip(offset);
}
if (maxRows > 0) {
result = result.limit(maxRows);
}
if (!applyProjectionDuringScan && projection != null) {
result = result.map(r -> projection.map(r, context));
}
String[] fieldNames;
Column[] columns;
if (projection != null) {
fieldNames = projection.getFieldNames();
columns = projection.getColumns();
} else {
fieldNames = table.columnNames;
columns = table.columns;
}
return new StreamDataScanner(transaction, fieldNames, columns, result);
} finally {
if (transaction != null) {
transaction.decreaseRefCount();
}
}
}
use of herddb.model.commands.ScanStatement in project herddb by diennea.
the class TableManager method streamTableData.
private Stream<Record> streamTableData(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
statement.validateContext(context);
Predicate predicate = statement.getPredicate();
boolean acquireLock = transaction != null || forWrite || lockRequired;
LocalScanPageCache lastPageRead = acquireLock ? null : new LocalScanPageCache();
IndexOperation indexOperation = predicate != null ? predicate.getIndexOperation() : null;
boolean primaryIndexSeek = indexOperation instanceof PrimaryIndexSeek;
AbstractIndexManager useIndex = getIndexForTbleAccess(indexOperation);
Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(indexOperation, context, tableContext, useIndex);
Stream<Record> resultFromTable = scanner.map(entry -> {
return accessRecord(entry, predicate, context, transaction, lastPageRead, primaryIndexSeek, forWrite, acquireLock);
}).filter(r -> r != null);
return resultFromTable;
}
use of herddb.model.commands.ScanStatement in project herddb by diennea.
the class TableManager method executeUpdateAsync.
private CompletableFuture<StatementExecutionResult> executeUpdateAsync(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
// LOGGER.log(Level.SEVERE, "executeUpdateAsync, " + update + ", transaction " + transaction);
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
/*
an update can succeed only if the row is valid, the key is contains in the "keys" structure
the update will simply override the value of the row, assigning a null page to the row
the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value (CAS operation)
locks: the update uses a lock on the the key
*/
RecordFunction function = update.getFunction();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = update.getPredicate();
Map<String, AbstractIndexManager> indexes = tableSpaceManager.getIndexesOnTable(table.name);
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
List<CompletableFuture<PendingLogEntryWork>> writes = new ArrayList<>();
try {
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record current, LockHandle lockHandle) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
List<UniqueIndexLockReference> uniqueIndexes = null;
byte[] newValue;
try {
if (childrenTables != null) {
DataAccessor currentValues = current.getDataAccessor(table);
for (Table childTable : childrenTables) {
executeForeignKeyConstraintsAsParentTable(childTable, currentValues, context, transaction, false);
}
}
newValue = function.computeNewValue(current, context, tableContext);
if (indexes != null || table.foreignKeys != null) {
DataAccessor values = new Record(current.key, Bytes.from_array(newValue)).getDataAccessor(table);
if (table.foreignKeys != null) {
for (ForeignKeyDef fk : table.foreignKeys) {
checkForeignKeyConstraintsAsChildTable(fk, values, context, transaction);
}
}
if (indexes != null) {
for (AbstractIndexManager index : indexes.values()) {
if (index.isUnique()) {
Bytes indexKey = RecordSerializer.serializeIndexKey(values, index.getIndex(), index.getColumnNames());
if (uniqueIndexes == null) {
uniqueIndexes = new ArrayList<>(1);
}
UniqueIndexLockReference uniqueIndexLock = new UniqueIndexLockReference(index, indexKey);
uniqueIndexes.add(uniqueIndexLock);
LockHandle lockForIndex = lockForWrite(uniqueIndexLock.key, transaction, index.getIndexName(), index.getLockManager());
if (transaction == null) {
uniqueIndexLock.lockHandle = lockForIndex;
}
if (index.valueAlreadyMapped(indexKey, current.key)) {
throw new UniqueIndexContraintViolationException(index.getIndexName(), indexKey, "Value " + indexKey + " already present in index " + index.getIndexName());
}
} else {
RecordSerializer.validateIndexableValue(values, index.getIndex(), index.getColumnNames());
}
}
}
}
} catch (IllegalArgumentException | herddb.utils.IllegalDataAccessException | StatementExecutionException err) {
locksManager.releaseLock(lockHandle);
StatementExecutionException finalError;
if (!(err instanceof StatementExecutionException)) {
finalError = new StatementExecutionException(err.getMessage(), err);
} else {
finalError = (StatementExecutionException) err;
}
CompletableFuture<PendingLogEntryWork> res = Futures.exception(finalError);
if (uniqueIndexes != null) {
for (UniqueIndexLockReference lock : uniqueIndexes) {
res = releaseWriteLock(res, lock.lockHandle, lock.indexManager.getLockManager());
}
}
writes.add(res);
return;
}
final long size = DataPage.estimateEntrySize(current.key, newValue);
if (size > maxLogicalPageSize) {
locksManager.releaseLock(lockHandle);
writes.add(Futures.exception(new RecordTooBigException("New version of record " + current.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(current) + ", max size " + maxLogicalPageSize)));
return;
}
LogEntry entry = LogEntryFactory.update(table, current.key, Bytes.from_array(newValue), transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
final List<UniqueIndexLockReference> _uniqueIndexes = uniqueIndexes;
writes.add(pos.logSequenceNumber.thenApply(lsn -> new PendingLogEntryWork(entry, pos, lockHandle, _uniqueIndexes)));
lastKey.value = current.key;
lastValue.value = newValue;
updateCount.incrementAndGet();
}
}, transaction, true, true);
} catch (HerdDBInternalException err) {
LOGGER.log(Level.SEVERE, "bad error during an update", err);
return Futures.exception(err);
}
if (writes.isEmpty()) {
return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
}
if (writes.size() == 1) {
return writes.get(0).whenCompleteAsync((pending, error) -> {
try {
// apply any of the DML operations
if (error == null) {
apply(pending.pos, pending.entry, false);
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pending) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
});
} else {
return Futures.collect(writes).whenCompleteAsync((pendings, error) -> {
try {
// apply any of the DML operations
if (error == null) {
for (PendingLogEntryWork pending : pendings) {
apply(pending.pos, pending.entry, false);
}
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pendings) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
});
}
}
use of herddb.model.commands.ScanStatement in project herddb by diennea.
the class AbstractSystemTableManager method scan.
@Override
public DataScanner scan(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
Predicate predicate = statement.getPredicate();
MaterializedRecordSet recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(table.columnNames, table.columns);
Iterable<Record> data = buildVirtualRecordList(transaction);
StreamSupport.stream(data.spliterator(), false).filter(record -> {
return (predicate == null || predicate.evaluate(record, context));
}).sorted(// enforce sort by PK
sortByPk).map(r -> r.getDataAccessor(table)).forEach(recordSet::add);
recordSet.writeFinished();
recordSet.sort(statement.getComparator());
recordSet.applyLimits(statement.getLimits(), context);
recordSet.applyProjection(statement.getProjection(), context);
return new SimpleDataScanner(transaction, recordSet);
}
Aggregations