use of herddb.log.LogNotAvailableException in project herddb by diennea.
the class BookkeeperCommitLog method followTheLeader.
@Override
public void followTheLeader(LogSequenceNumber skipPast, BiConsumer<LogSequenceNumber, LogEntry> consumer) throws LogNotAvailableException {
List<Long> actualList = metadataManager.getActualLedgersList(tableSpaceUUID).getActiveLedgers();
List<Long> toRead = actualList;
if (skipPast.ledgerId != -1) {
toRead = toRead.stream().filter(l -> l >= skipPast.ledgerId).collect(Collectors.toList());
}
try {
long nextEntry = skipPast.offset + 1;
// LOGGER.log(Level.SEVERE, "followTheLeader "+tableSpace+" skipPast:{0} toRead: {1} actualList:{2}, nextEntry:{3}", new Object[]{skipPast, toRead, actualList, nextEntry});
for (Long previous : toRead) {
// LOGGER.log(Level.SEVERE, "followTheLeader openLedger " + previous + " nextEntry:" + nextEntry);
LedgerHandle lh;
try {
lh = bookKeeper.openLedgerNoRecovery(previous, BookKeeper.DigestType.CRC32, sharedSecret.getBytes(StandardCharsets.UTF_8));
} catch (BKException.BKLedgerRecoveryException e) {
LOGGER.log(Level.SEVERE, "error", e);
return;
}
try {
long lastAddConfirmed = lh.getLastAddConfirmed();
LOGGER.log(Level.FINE, "followTheLeader " + tableSpaceUUID + " openLedger {0} -> lastAddConfirmed:{1}, nextEntry:{2}", new Object[] { previous, lastAddConfirmed, nextEntry });
if (nextEntry > lastAddConfirmed) {
nextEntry = 0;
continue;
}
Enumeration<LedgerEntry> entries = lh.readEntries(nextEntry, lh.getLastAddConfirmed());
while (entries.hasMoreElements()) {
LedgerEntry e = entries.nextElement();
long entryId = e.getEntryId();
byte[] entryData = e.getEntry();
LogEntry statusEdit = LogEntry.deserialize(entryData);
// LOGGER.log(Level.SEVERE, "" + tableSpaceUUID + " followentry {0},{1} -> {2}", new Object[]{previous, entryId, statusEdit});
LogSequenceNumber number = new LogSequenceNumber(previous, entryId);
lastSequenceNumber.accumulateAndGet(number.offset, EnsureLongIncrementAccumulator.INSTANCE);
lastLedgerId = number.ledgerId;
currentLedgerId = number.ledgerId;
consumer.accept(number, statusEdit);
}
} finally {
try {
lh.close();
} catch (BKException err) {
LOGGER.log(Level.SEVERE, "error while closing ledger", err);
} catch (InterruptedException err) {
LOGGER.log(Level.SEVERE, "error while closing ledger", err);
Thread.currentThread().interrupt();
}
}
}
} catch (InterruptedException | EOFException | BKException err) {
LOGGER.log(Level.SEVERE, "internal error", err);
throw new LogNotAvailableException(err);
}
}
use of herddb.log.LogNotAvailableException in project herddb by diennea.
the class TableManager method executeUpdate.
private StatementExecutionResult executeUpdate(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
/*
an update can succeed only if the row is valid, the key is contains in the "keys" structure
the update will simply override the value of the row, assigning a null page to the row
the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value (CAS operation)
locks: the update uses a lock on the the key
*/
RecordFunction function = update.getFunction();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = update.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
byte[] newValue = function.computeNewValue(actual, context, tableContext);
final long size = DataPage.estimateEntrySize(actual.key, newValue);
if (size > maxLogicalPageSize) {
throw new RecordTooBigException("New version of record " + actual.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(actual) + ", max size " + maxLogicalPageSize);
}
LogEntry entry = LogEntryFactory.update(table, actual.key.data, newValue, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = newValue;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
use of herddb.log.LogNotAvailableException in project herddb by diennea.
the class TableManager method executeDelete.
private StatementExecutionResult executeDelete(DeleteStatement delete, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = delete.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
LogEntry entry = LogEntryFactory.delete(table, actual.key.data, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = actual.value.data;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
use of herddb.log.LogNotAvailableException in project herddb by diennea.
the class TableSpaceManager method checkpoint.
// visible for testing
public TableSpaceCheckpoint checkpoint(boolean full, boolean pin, boolean alreadLocked) throws DataStorageManagerException, LogNotAvailableException {
if (virtual) {
return null;
}
if (recoveryInProgress) {
LOGGER.log(Level.INFO, "Checkpoint for tablespace {0} skipped. Recovery is still in progress", tableSpaceName);
return null;
}
long _start = System.currentTimeMillis();
LogSequenceNumber logSequenceNumber = null;
LogSequenceNumber _logSequenceNumber = null;
Map<String, LogSequenceNumber> checkpointsTableNameSequenceNumber = new HashMap<>();
try {
List<PostCheckpointAction> actions = new ArrayList<>();
long lockStamp = 0;
if (!alreadLocked) {
lockStamp = acquireWriteLock("checkpoint");
}
try {
logSequenceNumber = log.getLastSequenceNumber();
if (logSequenceNumber.isStartOfTime()) {
LOGGER.log(Level.INFO, "{0} checkpoint {1} at {2}. skipped (no write ever issued to log)", new Object[] { nodeId, tableSpaceName, logSequenceNumber });
return new TableSpaceCheckpoint(logSequenceNumber, checkpointsTableNameSequenceNumber);
}
LOGGER.log(Level.INFO, "{0} checkpoint start {1} at {2}", new Object[] { nodeId, tableSpaceName, logSequenceNumber });
if (actualLogSequenceNumber == null) {
throw new DataStorageManagerException("actualLogSequenceNumber cannot be null");
}
// TODO: transactions checkpoint is not atomic
Collection<Transaction> currentTransactions = new ArrayList<>(transactions.values());
for (Transaction t : currentTransactions) {
LogSequenceNumber txLsn = t.lastSequenceNumber;
if (txLsn != null && txLsn.after(logSequenceNumber)) {
LOGGER.log(Level.SEVERE, "Found transaction {0} with LSN {1} in the future", new Object[] { t.transactionId, txLsn });
}
}
actions.addAll(dataStorageManager.writeTransactionsAtCheckpoint(tableSpaceUUID, logSequenceNumber, currentTransactions));
actions.addAll(writeTablesOnDataStorageManager(new CommitLogResult(logSequenceNumber, false, true), true));
// we checkpoint all data to disk and save the actual log sequence number
for (AbstractTableManager tableManager : tables.values()) {
if (!tableManager.isSystemTable()) {
TableCheckpoint checkpoint = full ? tableManager.fullCheckpoint(pin) : tableManager.checkpoint(pin);
if (checkpoint != null) {
LOGGER.log(Level.INFO, "checkpoint done for table {0}.{1} (pin: {2})", new Object[] { tableSpaceName, tableManager.getTable().name, pin });
actions.addAll(checkpoint.actions);
checkpointsTableNameSequenceNumber.put(checkpoint.tableName, checkpoint.sequenceNumber);
if (afterTableCheckPointAction != null) {
afterTableCheckPointAction.run();
}
}
}
}
// we are sure that all data as been flushed. upon recovery we will replay the log starting from this position
actions.addAll(dataStorageManager.writeCheckpointSequenceNumber(tableSpaceUUID, logSequenceNumber));
/* Indexes checkpoint is handled by TableManagers */
if (leader) {
log.dropOldLedgers(logSequenceNumber);
}
_logSequenceNumber = log.getLastSequenceNumber();
} finally {
if (!alreadLocked) {
releaseWriteLock(lockStamp, "checkpoint");
}
}
for (PostCheckpointAction action : actions) {
try {
action.run();
} catch (Exception error) {
LOGGER.log(Level.SEVERE, "postcheckpoint error:" + error, error);
}
}
return new TableSpaceCheckpoint(logSequenceNumber, checkpointsTableNameSequenceNumber);
} finally {
long _stop = System.currentTimeMillis();
LOGGER.log(Level.INFO, "{0} checkpoint finish {1} started ad {2}, finished at {3}, total time {4} ms", new Object[] { nodeId, tableSpaceName, logSequenceNumber, _logSequenceNumber, Long.toString(_stop - _start) });
checkpointTimeStats.registerSuccessfulEvent(_stop, TimeUnit.MILLISECONDS);
}
}
use of herddb.log.LogNotAvailableException in project herddb by diennea.
the class TableSpaceManager method dropTable.
private StatementExecutionResult dropTable(DropTableStatement statement, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException {
boolean lockAcquired = false;
if (context.getTableSpaceLock() == 0) {
long lockStamp = acquireWriteLock(statement);
context.setTableSpaceLock(lockStamp);
lockAcquired = true;
}
try {
String tableNameUpperCase = statement.getTable().toUpperCase();
String tableNameNormalized = tables.keySet().stream().filter(t -> t.toUpperCase().equals(tableNameUpperCase)).findFirst().orElse(statement.getTable());
AbstractTableManager tableManager = tables.get(tableNameNormalized);
if (tableManager == null) {
if (statement.isIfExists()) {
return new DDLStatementExecutionResult(transaction != null ? transaction.transactionId : 0);
}
throw new TableDoesNotExistException("table does not exist " + tableNameNormalized + " on tableSpace " + statement.getTableSpace());
}
if (transaction != null && transaction.isTableDropped(tableNameNormalized)) {
if (statement.isIfExists()) {
return new DDLStatementExecutionResult(transaction.transactionId);
}
throw new TableDoesNotExistException("table does not exist " + tableNameNormalized + " on tableSpace " + statement.getTableSpace());
}
Table table = tableManager.getTable();
Table[] childrenTables = collectChildrenTables(table);
if (childrenTables != null) {
String errorMsg = "Cannot drop table " + table.tablespace + "." + table.name + " because it has children tables: " + Stream.of(childrenTables).map(t -> t.name).collect(Collectors.joining(","));
throw new StatementExecutionException(errorMsg);
}
Map<String, AbstractIndexManager> indexesOnTable = indexesByTable.get(tableNameNormalized);
if (indexesOnTable != null) {
for (String index : new ArrayList<>(indexesOnTable.keySet())) {
LogEntry entry = LogEntryFactory.dropIndex(index, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
}
}
LogEntry entry = LogEntryFactory.dropTable(tableNameNormalized, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
return new DDLStatementExecutionResult(entry.transactionId);
} catch (DataStorageManagerException | LogNotAvailableException err) {
throw new StatementExecutionException(err);
} finally {
if (lockAcquired) {
releaseWriteLock(context.getTableSpaceLock(), statement);
context.setTableSpaceLock(0);
}
}
}
Aggregations