use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableManager method executeUpdate.
private StatementExecutionResult executeUpdate(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
/*
an update can succeed only if the row is valid, the key is contains in the "keys" structure
the update will simply override the value of the row, assigning a null page to the row
the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value (CAS operation)
locks: the update uses a lock on the the key
*/
RecordFunction function = update.getFunction();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = update.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
byte[] newValue = function.computeNewValue(actual, context, tableContext);
final long size = DataPage.estimateEntrySize(actual.key, newValue);
if (size > maxLogicalPageSize) {
throw new RecordTooBigException("New version of record " + actual.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(actual) + ", max size " + maxLogicalPageSize);
}
LogEntry entry = LogEntryFactory.update(table, actual.key.data, newValue, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = newValue;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableManager method executeDelete.
private StatementExecutionResult executeDelete(DeleteStatement delete, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = delete.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
LogEntry entry = LogEntryFactory.delete(table, actual.key.data, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = actual.value.data;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableSpaceManager method commitTransaction.
private CompletableFuture<StatementExecutionResult> commitTransaction(CommitTransactionStatement statement, StatementEvaluationContext context) throws StatementExecutionException {
long txId = statement.getTransactionId();
validateTransactionBeforeTxCommand(txId);
LogEntry entry = LogEntryFactory.commitTransaction(txId);
long lockStamp = context.getTableSpaceLock();
boolean lockAcquired = false;
if (lockStamp == 0) {
lockStamp = acquireReadLock(statement);
context.setTableSpaceLock(lockStamp);
lockAcquired = true;
}
CommitLogResult pos = log.log(entry, true);
CompletableFuture<StatementExecutionResult> res = pos.logSequenceNumber.handleAsync((lsn, error) -> {
if (error == null) {
apply(pos, entry, false);
return new TransactionResult(txId, TransactionResult.OutcomeType.COMMIT);
} else {
// if the log is not able to write the commit
// apply a dummy "rollback", we are no more going to accept commands
// in the scope of this transaction
LogEntry rollback = LogEntryFactory.rollbackTransaction(txId);
apply(new CommitLogResult(LogSequenceNumber.START_OF_TIME, false, false), rollback, false);
throw new CompletionException(error);
}
}, callbacksExecutor);
if (lockAcquired) {
res = releaseReadLock(res, lockStamp, statement).thenApply(s -> {
context.setTableSpaceLock(0);
return s;
});
}
return res;
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableSpaceManager method checkpoint.
// visible for testing
public TableSpaceCheckpoint checkpoint(boolean full, boolean pin, boolean alreadLocked) throws DataStorageManagerException, LogNotAvailableException {
if (virtual) {
return null;
}
if (recoveryInProgress) {
LOGGER.log(Level.INFO, "Checkpoint for tablespace {0} skipped. Recovery is still in progress", tableSpaceName);
return null;
}
long _start = System.currentTimeMillis();
LogSequenceNumber logSequenceNumber = null;
LogSequenceNumber _logSequenceNumber = null;
Map<String, LogSequenceNumber> checkpointsTableNameSequenceNumber = new HashMap<>();
try {
List<PostCheckpointAction> actions = new ArrayList<>();
long lockStamp = 0;
if (!alreadLocked) {
lockStamp = acquireWriteLock("checkpoint");
}
try {
logSequenceNumber = log.getLastSequenceNumber();
if (logSequenceNumber.isStartOfTime()) {
LOGGER.log(Level.INFO, "{0} checkpoint {1} at {2}. skipped (no write ever issued to log)", new Object[] { nodeId, tableSpaceName, logSequenceNumber });
return new TableSpaceCheckpoint(logSequenceNumber, checkpointsTableNameSequenceNumber);
}
LOGGER.log(Level.INFO, "{0} checkpoint start {1} at {2}", new Object[] { nodeId, tableSpaceName, logSequenceNumber });
if (actualLogSequenceNumber == null) {
throw new DataStorageManagerException("actualLogSequenceNumber cannot be null");
}
// TODO: transactions checkpoint is not atomic
Collection<Transaction> currentTransactions = new ArrayList<>(transactions.values());
for (Transaction t : currentTransactions) {
LogSequenceNumber txLsn = t.lastSequenceNumber;
if (txLsn != null && txLsn.after(logSequenceNumber)) {
LOGGER.log(Level.SEVERE, "Found transaction {0} with LSN {1} in the future", new Object[] { t.transactionId, txLsn });
}
}
actions.addAll(dataStorageManager.writeTransactionsAtCheckpoint(tableSpaceUUID, logSequenceNumber, currentTransactions));
actions.addAll(writeTablesOnDataStorageManager(new CommitLogResult(logSequenceNumber, false, true), true));
// we checkpoint all data to disk and save the actual log sequence number
for (AbstractTableManager tableManager : tables.values()) {
if (!tableManager.isSystemTable()) {
TableCheckpoint checkpoint = full ? tableManager.fullCheckpoint(pin) : tableManager.checkpoint(pin);
if (checkpoint != null) {
LOGGER.log(Level.INFO, "checkpoint done for table {0}.{1} (pin: {2})", new Object[] { tableSpaceName, tableManager.getTable().name, pin });
actions.addAll(checkpoint.actions);
checkpointsTableNameSequenceNumber.put(checkpoint.tableName, checkpoint.sequenceNumber);
if (afterTableCheckPointAction != null) {
afterTableCheckPointAction.run();
}
}
}
}
// we are sure that all data as been flushed. upon recovery we will replay the log starting from this position
actions.addAll(dataStorageManager.writeCheckpointSequenceNumber(tableSpaceUUID, logSequenceNumber));
/* Indexes checkpoint is handled by TableManagers */
if (leader) {
log.dropOldLedgers(logSequenceNumber);
}
_logSequenceNumber = log.getLastSequenceNumber();
} finally {
if (!alreadLocked) {
releaseWriteLock(lockStamp, "checkpoint");
}
}
for (PostCheckpointAction action : actions) {
try {
action.run();
} catch (Exception error) {
LOGGER.log(Level.SEVERE, "postcheckpoint error:" + error, error);
}
}
return new TableSpaceCheckpoint(logSequenceNumber, checkpointsTableNameSequenceNumber);
} finally {
long _stop = System.currentTimeMillis();
LOGGER.log(Level.INFO, "{0} checkpoint finish {1} started ad {2}, finished at {3}, total time {4} ms", new Object[] { nodeId, tableSpaceName, logSequenceNumber, _logSequenceNumber, Long.toString(_stop - _start) });
checkpointTimeStats.registerSuccessfulEvent(_stop, TimeUnit.MILLISECONDS);
}
}
use of herddb.log.CommitLogResult in project herddb by diennea.
the class TableSpaceManager method dropTable.
private StatementExecutionResult dropTable(DropTableStatement statement, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException {
boolean lockAcquired = false;
if (context.getTableSpaceLock() == 0) {
long lockStamp = acquireWriteLock(statement);
context.setTableSpaceLock(lockStamp);
lockAcquired = true;
}
try {
String tableNameUpperCase = statement.getTable().toUpperCase();
String tableNameNormalized = tables.keySet().stream().filter(t -> t.toUpperCase().equals(tableNameUpperCase)).findFirst().orElse(statement.getTable());
AbstractTableManager tableManager = tables.get(tableNameNormalized);
if (tableManager == null) {
if (statement.isIfExists()) {
return new DDLStatementExecutionResult(transaction != null ? transaction.transactionId : 0);
}
throw new TableDoesNotExistException("table does not exist " + tableNameNormalized + " on tableSpace " + statement.getTableSpace());
}
if (transaction != null && transaction.isTableDropped(tableNameNormalized)) {
if (statement.isIfExists()) {
return new DDLStatementExecutionResult(transaction.transactionId);
}
throw new TableDoesNotExistException("table does not exist " + tableNameNormalized + " on tableSpace " + statement.getTableSpace());
}
Table table = tableManager.getTable();
Table[] childrenTables = collectChildrenTables(table);
if (childrenTables != null) {
String errorMsg = "Cannot drop table " + table.tablespace + "." + table.name + " because it has children tables: " + Stream.of(childrenTables).map(t -> t.name).collect(Collectors.joining(","));
throw new StatementExecutionException(errorMsg);
}
Map<String, AbstractIndexManager> indexesOnTable = indexesByTable.get(tableNameNormalized);
if (indexesOnTable != null) {
for (String index : new ArrayList<>(indexesOnTable.keySet())) {
LogEntry entry = LogEntryFactory.dropIndex(index, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
}
}
LogEntry entry = LogEntryFactory.dropTable(tableNameNormalized, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
return new DDLStatementExecutionResult(entry.transactionId);
} catch (DataStorageManagerException | LogNotAvailableException err) {
throw new StatementExecutionException(err);
} finally {
if (lockAcquired) {
releaseWriteLock(context.getTableSpaceLock(), statement);
context.setTableSpaceLock(0);
}
}
}
Aggregations