use of herddb.utils.LockHandle in project herddb by diennea.
the class TableManager method lockForWrite.
private LockHandle lockForWrite(Bytes key, Transaction transaction) {
if (transaction != null) {
LockHandle lock = transaction.lookupLock(table.name, key);
if (lock != null) {
if (lock.write) {
// transaction already locked the key for writes
return lock;
} else {
// transaction already locked the key, but we need to upgrade the lock
locksManager.releaseLock(lock);
transaction.unregisterUpgradedLocksOnTable(table.name, lock);
lock = locksManager.acquireWriteLockForKey(key);
transaction.registerLockOnTable(this.table.name, lock);
return lock;
}
} else {
lock = locksManager.acquireWriteLockForKey(key);
transaction.registerLockOnTable(this.table.name, lock);
return lock;
}
} else {
return locksManager.acquireWriteLockForKey(key);
}
}
use of herddb.utils.LockHandle in project herddb by diennea.
the class TableManager method lockForWrite.
private static LockHandle lockForWrite(Bytes key, Transaction transaction, String lockKey, ILocalLockManager locksManager) {
// LOGGER.log(Level.SEVERE, "lockForWrite for " + key + " tx " + transaction);
try {
if (transaction != null) {
LockHandle lock = transaction.lookupLock(lockKey, key);
if (lock != null) {
if (lock.write) {
// transaction already locked the key for writes
return lock;
} else {
// transaction already locked the key, but we need to upgrade the lock
locksManager.releaseLock(lock);
transaction.unregisterUpgradedLocksOnTable(lockKey, lock);
lock = locksManager.acquireWriteLockForKey(key);
transaction.registerLockOnTable(lockKey, lock);
return lock;
}
} else {
lock = locksManager.acquireWriteLockForKey(key);
transaction.registerLockOnTable(lockKey, lock);
return lock;
}
} else {
return locksManager.acquireWriteLockForKey(key);
}
} catch (HerdDBInternalException err) {
// locktimeout or other internal lockmanager error
throw err;
} catch (RuntimeException err) {
// locktimeout or other internal lockmanager error
throw new StatementExecutionException(err);
}
}
use of herddb.utils.LockHandle in project herddb by diennea.
the class TableManager method scanNoStream.
private DataScanner scanNoStream(ScanStatement statement, StatementEvaluationContext context, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
if (transaction != null) {
transaction.increaseRefcount();
}
try {
boolean sorted = statement.getComparator() != null;
boolean sortedByClusteredIndex = statement.getComparator() != null && statement.getComparator().isOnlyPrimaryKeyAndAscending() && keyToPageSortedAscending;
final Projection projection = statement.getProjection();
boolean applyProjectionDuringScan = !sorted && projection != null;
MaterializedRecordSet recordSet;
if (applyProjectionDuringScan) {
recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(projection.getFieldNames(), projection.getColumns());
} else {
recordSet = tableSpaceManager.getDbmanager().getRecordSetFactory().createRecordSet(table.columnNames, table.columns);
}
ScanLimits limits = statement.getLimits();
int maxRows = limits == null ? 0 : limits.computeMaxRows(context);
int offset = limits == null ? 0 : limits.computeOffset(context);
boolean sortDone = false;
if (maxRows > 0) {
if (sortedByClusteredIndex) {
// leverage the sorted nature of the clustered primary key index
AtomicInteger remaining = new AtomicInteger(maxRows);
if (offset > 0) {
remaining.getAndAdd(offset);
}
accessTableData(statement, context, new ScanResultOperation() {
private boolean inTransactionData;
@Override
public void beginNewRecordsInTransactionBlock() {
inTransactionData = true;
}
@Override
public void accept(Record record, LockHandle lockHandle) throws StatementExecutionException {
try {
if (applyProjectionDuringScan) {
DataAccessor tuple = projection.map(record.getDataAccessor(table), context);
recordSet.add(tuple);
} else {
recordSet.add(record.getDataAccessor(table));
}
if (!inTransactionData) {
// in the same order as the clustered index
if (remaining.decrementAndGet() == 0) {
// we want to receive transaction data uncommitted records too
throw new ExitLoop(true);
}
}
} finally {
locksManager.releaseLock(lockHandle);
}
}
}, transaction, lockRequired, forWrite);
// we have to sort data any way, because accessTableData will return partially sorted data
sortDone = transaction == null;
} else if (sorted) {
InStreamTupleSorter sorter = new InStreamTupleSorter(offset + maxRows, statement.getComparator());
accessTableData(statement, context, new ScanResultOperation() {
@Override
public void accept(Record record, LockHandle lockHandle) throws StatementExecutionException {
try {
if (applyProjectionDuringScan) {
DataAccessor tuple = projection.map(record.getDataAccessor(table), context);
sorter.collect(tuple);
} else {
sorter.collect(record.getDataAccessor(table));
}
} finally {
locksManager.releaseLock(lockHandle);
}
}
}, transaction, lockRequired, forWrite);
sorter.flushToRecordSet(recordSet);
sortDone = true;
} else {
// if no sort is present the limits can be applying during the scan and perform an early exit
AtomicInteger remaining = new AtomicInteger(maxRows);
if (offset > 0) {
remaining.getAndAdd(offset);
}
accessTableData(statement, context, new ScanResultOperation() {
@Override
public void accept(Record record, LockHandle lockHandle) throws StatementExecutionException {
try {
if (applyProjectionDuringScan) {
DataAccessor tuple = projection.map(record.getDataAccessor(table), context);
recordSet.add(tuple);
} else {
recordSet.add(record.getDataAccessor(table));
}
if (remaining.decrementAndGet() == 0) {
throw new ExitLoop(false);
}
} finally {
locksManager.releaseLock(lockHandle);
}
}
}, transaction, lockRequired, forWrite);
}
} else {
accessTableData(statement, context, new ScanResultOperation() {
@Override
public void accept(Record record, LockHandle lockHandle) throws StatementExecutionException {
try {
if (applyProjectionDuringScan) {
DataAccessor tuple = projection.map(record.getDataAccessor(table), context);
recordSet.add(tuple);
} else {
recordSet.add(record.getDataAccessor(table));
}
} finally {
locksManager.releaseLock(lockHandle);
}
}
}, transaction, lockRequired, forWrite);
}
recordSet.writeFinished();
if (!sortDone) {
recordSet.sort(statement.getComparator());
}
recordSet.applyLimits(statement.getLimits(), context);
if (!applyProjectionDuringScan) {
recordSet.applyProjection(statement.getProjection(), context);
}
return new SimpleDataScanner(transaction, recordSet);
} finally {
if (transaction != null) {
transaction.decreaseRefCount();
}
}
}
use of herddb.utils.LockHandle in project herddb by diennea.
the class TableManager method accessRecord.
public Record accessRecord(Map.Entry<Bytes, Long> entry, Predicate predicate, StatementEvaluationContext context, Transaction transaction, LocalScanPageCache lastPageRead, boolean primaryIndexSeek, boolean forWrite, boolean acquireLock) {
Bytes key = entry.getKey();
boolean keep_lock = false;
boolean already_locked = transaction != null && transaction.lookupLock(table.name, key) != null;
LockHandle lock = acquireLock ? (forWrite ? lockForWrite(key, transaction) : lockForRead(key, transaction)) : null;
try {
if (transaction != null) {
transaction.touch();
if (transaction.recordDeleted(table.name, key)) {
// skip this record. inside current transaction it has been deleted
return null;
}
Record record = transaction.recordUpdated(table.name, key);
if (record != null) {
// use current transaction version of the record
if (predicate == null || predicate.evaluate(record, context)) {
keep_lock = context.isForceRetainReadLock() || (lock != null && lock.write);
return record;
}
return null;
}
}
Long pageId = entry.getValue();
if (pageId != null) {
boolean pkFilterCompleteMatch = false;
if (!primaryIndexSeek && predicate != null) {
Predicate.PrimaryKeyMatchOutcome outcome = predicate.matchesRawPrimaryKey(key, context);
if (outcome == Predicate.PrimaryKeyMatchOutcome.FAILED) {
return null;
} else if (outcome == Predicate.PrimaryKeyMatchOutcome.FULL_CONDITION_VERIFIED) {
pkFilterCompleteMatch = true;
}
}
Record record = fetchRecord(key, pageId, lastPageRead);
if (record != null && (pkFilterCompleteMatch || predicate == null || predicate.evaluate(record, context))) {
keep_lock = context.isForceRetainReadLock() || (lock != null && lock.write);
return record;
}
}
return null;
} finally {
// release the lock on the key if it did not match scan criteria
if (transaction == null) {
if (lock != null) {
locksManager.releaseLock(lock);
}
} else if (!keep_lock && !already_locked) {
transaction.releaseLockOnKey(table.name, key, locksManager);
}
}
}
use of herddb.utils.LockHandle in project herddb by diennea.
the class TableManager method executeDeleteAsync.
private CompletableFuture<StatementExecutionResult> executeDeleteAsync(DeleteStatement delete, Transaction transaction, StatementEvaluationContext context) {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<Bytes> lastValue = new Holder<>();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = delete.getPredicate();
List<CompletableFuture<PendingLogEntryWork>> writes = new ArrayList<>();
Map<String, AbstractIndexManager> indexes = tableSpaceManager.getIndexesOnTable(table.name);
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
try {
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record current, LockHandle lockHandle) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
// ensure we are holding the write locks on every unique index
List<UniqueIndexLockReference> uniqueIndexes = null;
try {
if (indexes != null || childrenTables != null) {
DataAccessor dataAccessor = current.getDataAccessor(table);
if (childrenTables != null) {
for (Table childTable : childrenTables) {
executeForeignKeyConstraintsAsParentTable(childTable, dataAccessor, context, transaction, true);
}
}
if (indexes != null) {
for (AbstractIndexManager index : indexes.values()) {
if (index.isUnique()) {
Bytes indexKey = RecordSerializer.serializeIndexKey(dataAccessor, index.getIndex(), index.getColumnNames());
if (uniqueIndexes == null) {
uniqueIndexes = new ArrayList<>(1);
}
UniqueIndexLockReference uniqueIndexLock = new UniqueIndexLockReference(index, indexKey);
uniqueIndexes.add(uniqueIndexLock);
LockHandle lockForIndex = lockForWrite(uniqueIndexLock.key, transaction, index.getIndexName(), index.getLockManager());
if (transaction == null) {
uniqueIndexLock.lockHandle = lockForIndex;
}
}
}
}
}
} catch (IllegalArgumentException | herddb.utils.IllegalDataAccessException | StatementExecutionException err) {
locksManager.releaseLock(lockHandle);
StatementExecutionException finalError;
if (!(err instanceof StatementExecutionException)) {
finalError = new StatementExecutionException(err.getMessage(), err);
} else {
finalError = (StatementExecutionException) err;
}
CompletableFuture<PendingLogEntryWork> res = Futures.exception(finalError);
if (uniqueIndexes != null) {
for (UniqueIndexLockReference lock : uniqueIndexes) {
res = releaseWriteLock(res, lockHandle, lock.indexManager.getLockManager());
}
}
writes.add(res);
return;
}
LogEntry entry = LogEntryFactory.delete(table, current.key, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
final List<UniqueIndexLockReference> _uniqueIndexes = uniqueIndexes;
writes.add(pos.logSequenceNumber.thenApply(lsn -> new PendingLogEntryWork(entry, pos, lockHandle, _uniqueIndexes)));
lastKey.value = current.key;
lastValue.value = current.value;
updateCount.incrementAndGet();
}
}, transaction, true, true);
} catch (HerdDBInternalException err) {
LOGGER.log(Level.SEVERE, "bad error during a delete", err);
return Futures.exception(err);
}
if (writes.isEmpty()) {
return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
}
if (writes.size() == 1) {
return writes.get(0).whenCompleteAsync((pending, error) -> {
try {
// apply any of the DML operations
if (error == null) {
apply(pending.pos, pending.entry, false);
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pending) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? lastValue.value : null);
});
} else {
return Futures.collect(writes).whenCompleteAsync((pendings, error) -> {
try {
// apply any of the DML operations
if (error == null) {
for (PendingLogEntryWork pending : pendings) {
apply(pending.pos, pending.entry, false);
}
}
} finally {
releaseMultiplePendingLogEntryWorks(writes);
}
}, tableSpaceManager.getCallbacksExecutor()).thenApply((pendings) -> {
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? lastValue.value : null);
});
}
}
Aggregations