use of herddb.model.Transaction in project herddb by diennea.
the class TableManager method scanWithStream.
private DataScanner scanWithStream(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
if (transaction != null) {
transaction.increaseRefcount();
}
try {
final TupleComparator comparator = statement.getComparator();
boolean sorted = comparator != null;
boolean sortedByClusteredIndex = comparator != null && comparator.isOnlyPrimaryKeyAndAscending() && keyToPageSortedAscending;
final Projection projection = statement.getProjection();
final boolean applyProjectionDuringScan = projection != null && !sorted;
ScanLimits limits = statement.getLimits();
int maxRows = limits == null ? 0 : limits.computeMaxRows(context);
int offset = limits == null ? 0 : limits.computeOffset(context);
Stream<DataAccessor> result;
Function<Record, DataAccessor> mapper = (Record record) -> {
DataAccessor tuple;
if (applyProjectionDuringScan) {
tuple = projection.map(record.getDataAccessor(table), context);
} else {
tuple = record.getDataAccessor(table);
}
return tuple;
};
Stream<Record> recordsFromTransactionSorted = streamTransactionData(transaction, statement.getPredicate(), context);
Stream<DataAccessor> fromTransactionSorted = recordsFromTransactionSorted != null ? recordsFromTransactionSorted.map(mapper) : null;
if (fromTransactionSorted != null && comparator != null) {
fromTransactionSorted = fromTransactionSorted.sorted(comparator);
}
Stream<DataAccessor> tableData = streamTableData(statement, context, transaction, lockRequired, forWrite).map(mapper);
if (maxRows > 0) {
if (sortedByClusteredIndex) {
// already sorted if needed
if (fromTransactionSorted != null) {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
// already sorted from index
tableData = tableData.limit(maxRows + offset);
// no need to re-sort
result = tableData;
}
} else if (sorted) {
// need to sort
tableData = tableData.sorted(comparator);
// already sorted if needed
if (fromTransactionSorted != null) {
tableData = tableData.limit(maxRows + offset);
fromTransactionSorted = fromTransactionSorted.limit(maxRows + offset);
// we need to re-sort after merging the data
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
tableData = tableData.limit(maxRows + offset);
// no need to sort again
result = tableData;
}
} else if (fromTransactionSorted == null) {
result = tableData;
} else {
result = Stream.concat(fromTransactionSorted, tableData);
}
} else {
if (sortedByClusteredIndex) {
// already sorted from index
if (fromTransactionSorted != null) {
tableData = tableData.sorted(comparator);
// fromTransactionSorted is already sorted
// we need to re-sort
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData;
}
} else if (sorted) {
// we need to re-sort
if (fromTransactionSorted != null) {
result = Stream.concat(fromTransactionSorted, tableData).sorted(comparator);
} else {
result = tableData.sorted(comparator);
}
} else if (fromTransactionSorted != null) {
// no need to sort
result = Stream.concat(fromTransactionSorted, tableData);
} else {
result = tableData;
}
}
if (offset > 0) {
result = result.skip(offset);
}
if (maxRows > 0) {
result = result.limit(maxRows);
}
if (!applyProjectionDuringScan && projection != null) {
result = result.map(r -> projection.map(r, context));
}
String[] fieldNames;
Column[] columns;
if (projection != null) {
fieldNames = projection.getFieldNames();
columns = projection.getColumns();
} else {
fieldNames = table.columnNames;
columns = table.columns;
}
return new StreamDataScanner(transaction, fieldNames, columns, result);
} finally {
if (transaction != null) {
transaction.decreaseRefCount();
}
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class TableManager method streamTableData.
private Stream<Record> streamTableData(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
statement.validateContext(context);
Predicate predicate = statement.getPredicate();
boolean acquireLock = transaction != null || forWrite || lockRequired;
LocalScanPageCache lastPageRead = acquireLock ? null : new LocalScanPageCache();
IndexOperation indexOperation = predicate != null ? predicate.getIndexOperation() : null;
boolean primaryIndexSeek = indexOperation instanceof PrimaryIndexSeek;
AbstractIndexManager useIndex = getIndexForTbleAccess(indexOperation);
Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(indexOperation, context, tableContext, useIndex);
Stream<Record> resultFromTable = scanner.map(entry -> {
return accessRecord(entry, predicate, context, transaction, lastPageRead, primaryIndexSeek, forWrite, acquireLock);
}).filter(r -> r != null);
return resultFromTable;
}
use of herddb.model.Transaction in project herddb by diennea.
the class TableManager method executeUpdateAsync.
private CompletableFuture<StatementExecutionResult> executeUpdateAsync(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
// LOGGER.log(Level.SEVERE, "executeUpdateAsync, " + update + ", transaction " + transaction);
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
/*
an update can succeed only if the row is valid, the key is contains in the "keys" structure
the update will simply override the value of the row, assigning a null page to the row
the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value (CAS operation)
locks: the update uses a lock on the the key
*/
RecordFunction function = update.getFunction();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = update.getPredicate();
Map<String, AbstractIndexManager> indexes = tableSpaceManager.getIndexesOnTable(table.name);
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
List<CompletableFuture<PendingLogEntryWork>> writes = new ArrayList<>();
try {
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record current, LockHandle lockHandle) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
List<UniqueIndexLockReference> uniqueIndexes = null;
byte[] newValue;
try {
if (childrenTables != null) {
DataAccessor currentValues = current.getDataAccessor(table);
for (Table childTable : childrenTables) {
executeForeignKeyConstraintsAsParentTable(childTable, currentValues, context, transaction, false);
}
}
newValue = function.computeNewValue(current, context, tableContext);
if (indexes != null || table.foreignKeys != null) {
DataAccessor values = new Record(current.key, Bytes.from_array(newValue)).getDataAccessor(table);
if (table.foreignKeys != null) {
for (ForeignKeyDef fk : table.foreignKeys) {
checkForeignKeyConstraintsAsChildTable(fk, values, context, transaction);
}
}
if (indexes != null) {
for (AbstractIndexManager index : indexes.values()) {
if (index.isUnique()) {
Bytes indexKey = RecordSerializer.serializeIndexKey(values, index.getIndex(), index.getColumnNames());
if (uniqueIndexes == null) {
uniqueIndexes = new ArrayList<>(1);
}
UniqueIndexLockReference uniqueIndexLock = new UniqueIndexLockReference(index, indexKey);
uniqueIndexes.add(uniqueIndexLock);
LockHandle lockForIndex = lockForWrite(uniqueIndexLock.key, transaction, index.getIndexName(), index.getLockManager());
if (transaction == null) {
uniqueIndexLock.lockHandle = lockForIndex;
}
if (index.valueAlreadyMapped(indexKey, current.key)) {
throw new UniqueIndexContraintViolationException(index.getIndexName(), indexKey, "Value " + indexKey + " already present in index " + index.getIndexName());
}
} else {
RecordSerializer.validateIndexableValue(values, index.getIndex(), index.getColumnNames());
}
}
}
}
} catch (IllegalArgumentException | herddb.utils.IllegalDataAccessException | StatementExecutionException err) {
locksManager.releaseLock(lockHandle);
StatementExecutionException finalError;
if (!(err instanceof StatementExecutionException)) {
finalError = new StatementExecutionException(err.getMessage(), err);
} else {
finalError = (StatementExecutionException) err;
}
CompletableFuture<PendingLogEntryWork> res = Futures.exception(finalError);
if (uniqueIndexes != null) {
for (UniqueIndexLockReference lock : uniqueIndexes) {
res = releaseWriteLock(res, lock.lockHandle, lock.indexManager.getLockManager());
}
}
writes.add(res);
return;
}
final long size = DataPage.estimateEntrySize(current.key, newValue);
if (size > maxLogicalPageSize) {
locksManager.releaseLock(lockHandle);
writes.add(Futures.exception(new RecordTooBigException("New version of record " + current.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(current) + ", max size " + maxLogicalPageSize)));
return;
}
LogEntry entry = LogEntryFactory.update(table, current.key, Bytes.from_array(newValue), transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
final List<UniqueIndexLockReference> _uniqueIndexes = uniqueIndexes;
writes.add(pos.logSequenceNumber.thenApply(lsn -> new PendingLogEntryWork(entry, pos, lockHandle, _uniqueIndexes)));
lastKey.value = current.key;
lastValue.value = newValue;
updateCount.incrementAndGet();
}
}, transaction, true, true);
} catch (HerdDBInternalException err) {
LOGGER.log(Level.SEVERE, "bad error during an update", err);
return Futures.exception(err);
}
if (writes.isEmpty()) {
return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
}
if (writes.size() == 1) {
return writes.get(0).whenCompleteAsync((pending, error) -> {
try {
// apply any of the DML operations
if (error == null) {
apply(pending.pos, pending.entry, false);
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pending) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
});
} else {
return Futures.collect(writes).whenCompleteAsync((pendings, error) -> {
try {
// apply any of the DML operations
if (error == null) {
for (PendingLogEntryWork pending : pendings) {
apply(pending.pos, pending.entry, false);
}
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pendings) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
});
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class TableSpaceManager method scan.
public DataScanner scan(ScanStatement statement, StatementEvaluationContext context, TransactionContext transactionContext, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
boolean rollbackOnError = false;
if (transactionContext.transactionId == TransactionContext.AUTOTRANSACTION_ID && (lockRequired || forWrite || context.isForceAcquireWriteLock() || context.isForceRetainReadLock())) {
try {
// sync on beginTransaction
StatementExecutionResult newTransaction = Futures.result(beginTransactionAsync(context, true));
transactionContext = new TransactionContext(newTransaction.transactionId);
rollbackOnError = true;
} catch (Exception err) {
if (err.getCause() instanceof HerdDBInternalException) {
throw (HerdDBInternalException) err.getCause();
} else {
throw new StatementExecutionException(err.getCause());
}
}
}
Transaction transaction = transactions.get(transactionContext.transactionId);
if (transactionContext.transactionId > 0 && transaction == null) {
throw new StatementExecutionException("transaction " + transactionContext.transactionId + " does not exist on tablespace " + tableSpaceName);
}
if (transaction != null && !transaction.tableSpace.equals(tableSpaceName)) {
throw new StatementExecutionException("transaction " + transaction.transactionId + " is for tablespace " + transaction.tableSpace + ", not for " + tableSpaceName);
}
if (transaction != null) {
transaction.touch();
}
try {
String table = statement.getTable();
AbstractTableManager tableManager = tables.get(table);
if (tableManager == null) {
throw new TableDoesNotExistException("no table " + table + " in tablespace " + tableSpaceName);
}
if (tableManager.getCreatedInTransaction() > 0) {
if (transaction == null || transaction.transactionId != tableManager.getCreatedInTransaction()) {
throw new TableDoesNotExistException("no table " + table + " in tablespace " + tableSpaceName + ". created temporary in transaction " + tableManager.getCreatedInTransaction());
}
}
return tableManager.scan(statement, context, transaction, lockRequired, forWrite);
} catch (StatementExecutionException error) {
if (rollbackOnError) {
LOGGER.log(Level.FINE, tableSpaceName + " forcing rollback of implicit tx " + transactionContext.transactionId, error);
try {
rollbackTransaction(new RollbackTransactionStatement(tableSpaceName, transactionContext.transactionId), context).get();
} catch (ExecutionException err) {
throw new StatementExecutionException(err.getCause());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
error.addSuppressed(ex);
}
}
throw error;
}
}
use of herddb.model.Transaction in project herddb by diennea.
the class TableSpaceManager method sendTransactionsDump.
private void sendTransactionsDump(List<Transaction> batch, Channel channel, String dumpId, final int timeout) throws TimeoutException, InterruptedException {
if (batch.isEmpty()) {
return;
}
List<KeyValue> encodedTransactions = batch.stream().map(tr -> {
return new KeyValue(Bytes.from_long(tr.transactionId), Bytes.from_array(tr.serialize()));
}).collect(Collectors.toList());
long id = channel.generateRequestId();
try (Pdu response_to_transactionsData = channel.sendMessageWithPduReply(id, PduCodec.TablespaceDumpData.write(id, tableSpaceName, dumpId, "transactions", null, 0, 0, 0, null, encodedTransactions), timeout)) {
if (response_to_transactionsData.type != Pdu.TYPE_ACK) {
LOGGER.log(Level.SEVERE, "error response at transactionsData command");
}
}
batch.clear();
}
Aggregations