use of herddb.log.LogEntry in project herddb by diennea.
the class TableManager method executeInsert.
private StatementExecutionResult executeInsert(InsertStatement insert, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
/*
an insert can succeed only if the row is valid and the "keys" structure does not contain the requested key
the insert will add the row in the 'buffer' without assigning a page to it
locks: the insert uses global 'insert' lock on the table
the insert will update the 'maxKey' for auto_increment primary keys
*/
Bytes key = new Bytes(insert.getKeyFunction().computeNewValue(null, context, tableContext));
byte[] value = insert.getValuesFunction().computeNewValue(new Record(key, null), context, tableContext);
final long size = DataPage.estimateEntrySize(key, value);
if (size > maxLogicalPageSize) {
throw new RecordTooBigException("New record " + key + " is to big to be inserted: size " + size + ", max size " + maxLogicalPageSize);
}
LockHandle lock = lockForWrite(key, transaction);
try {
if (transaction != null) {
if (transaction.recordDeleted(table.name, key)) {
// OK, INSERT on a DELETED record inside this transaction
} else if (transaction.recordInserted(table.name, key) != null) {
// ERROR, INSERT on a INSERTED record inside this transaction
throw new DuplicatePrimaryKeyException(key, "key " + key + ", decoded as " + RecordSerializer.deserializePrimaryKey(key.data, table) + ", already exists in table " + table.name + " inside transaction " + transaction.transactionId);
} else if (keyToPage.containsKey(key)) {
throw new DuplicatePrimaryKeyException(key, "key " + key + ", decoded as " + RecordSerializer.deserializePrimaryKey(key.data, table) + ", already exists in table " + table.name + " during transaction " + transaction.transactionId);
}
} else if (keyToPage.containsKey(key)) {
throw new DuplicatePrimaryKeyException(key, "key " + key + ", decoded as " + RecordSerializer.deserializePrimaryKey(key.data, table) + ", already exists in table " + table.name);
}
LogEntry entry = LogEntryFactory.insert(table, key.data, value, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
return new DMLStatementExecutionResult(entry.transactionId, 1, key, insert.isReturnValues() ? Bytes.from_array(value) : null);
} catch (LogNotAvailableException err) {
throw new StatementExecutionException(err);
} finally {
if (transaction == null) {
locksManager.releaseWriteLockForKey(key, lock);
}
}
}
use of herddb.log.LogEntry in project herddb by diennea.
the class TableSpaceManager method dropTable.
private StatementExecutionResult dropTable(DropTableStatement statement, Transaction transaction) throws StatementExecutionException {
generalLock.writeLock().lock();
try {
if (!tables.containsKey(statement.getTable())) {
if (statement.isIfExists()) {
return new DDLStatementExecutionResult(transaction != null ? transaction.transactionId : 0);
}
throw new TableDoesNotExistException("table does not exist " + statement.getTable() + " on tableSpace " + statement.getTableSpace());
}
if (transaction != null && transaction.isTableDropped(statement.getTable())) {
if (statement.isIfExists()) {
return new DDLStatementExecutionResult(transaction.transactionId);
}
throw new TableDoesNotExistException("table does not exist " + statement.getTable() + " on tableSpace " + statement.getTableSpace());
}
Map<String, AbstractIndexManager> indexesOnTable = indexesByTable.get(statement.getTable());
if (indexesOnTable != null) {
for (String index : indexesOnTable.keySet()) {
LogEntry entry = LogEntryFactory.dropIndex(index, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
}
}
LogEntry entry = LogEntryFactory.dropTable(statement.getTable(), transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
return new DDLStatementExecutionResult(entry.transactionId);
} catch (DataStorageManagerException | LogNotAvailableException err) {
throw new StatementExecutionException(err);
} finally {
generalLock.writeLock().unlock();
}
}
use of herddb.log.LogEntry in project herddb by diennea.
the class TableSpaceManager method createTable.
private StatementExecutionResult createTable(CreateTableStatement statement, Transaction transaction) throws StatementExecutionException {
generalLock.writeLock().lock();
try {
if (tables.containsKey(statement.getTableDefinition().name)) {
throw new TableAlreadyExistsException(statement.getTableDefinition().name);
}
for (Index additionalIndex : statement.getAdditionalIndexes()) {
if (indexes.containsKey(additionalIndex.name)) {
throw new IndexAlreadyExistsException(additionalIndex.name);
}
}
LogEntry entry = LogEntryFactory.createTable(statement.getTableDefinition(), transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
for (Index additionalIndex : statement.getAdditionalIndexes()) {
LogEntry index_entry = LogEntryFactory.createIndex(additionalIndex, transaction);
CommitLogResult index_pos = log.log(index_entry, index_entry.transactionId <= 0);
apply(index_pos, index_entry, false);
}
return new DDLStatementExecutionResult(entry.transactionId);
} catch (DataStorageManagerException | LogNotAvailableException err) {
throw new StatementExecutionException(err);
} finally {
generalLock.writeLock().unlock();
}
}
use of herddb.log.LogEntry in project herddb by diennea.
the class BookkeeperCommitLog method followTheLeader.
@Override
public void followTheLeader(LogSequenceNumber skipPast, BiConsumer<LogSequenceNumber, LogEntry> consumer) throws LogNotAvailableException {
List<Long> actualList = metadataManager.getActualLedgersList(tableSpaceUUID).getActiveLedgers();
List<Long> toRead = actualList;
if (skipPast.ledgerId != -1) {
toRead = toRead.stream().filter(l -> l >= skipPast.ledgerId).collect(Collectors.toList());
}
try {
long nextEntry = skipPast.offset + 1;
// LOGGER.log(Level.SEVERE, "followTheLeader "+tableSpace+" skipPast:{0} toRead: {1} actualList:{2}, nextEntry:{3}", new Object[]{skipPast, toRead, actualList, nextEntry});
for (Long previous : toRead) {
// LOGGER.log(Level.SEVERE, "followTheLeader openLedger " + previous + " nextEntry:" + nextEntry);
LedgerHandle lh;
try {
lh = bookKeeper.openLedgerNoRecovery(previous, BookKeeper.DigestType.CRC32, sharedSecret.getBytes(StandardCharsets.UTF_8));
} catch (BKException.BKLedgerRecoveryException e) {
LOGGER.log(Level.SEVERE, "error", e);
return;
}
try {
long lastAddConfirmed = lh.getLastAddConfirmed();
LOGGER.log(Level.FINE, "followTheLeader " + tableSpaceUUID + " openLedger {0} -> lastAddConfirmed:{1}, nextEntry:{2}", new Object[] { previous, lastAddConfirmed, nextEntry });
if (nextEntry > lastAddConfirmed) {
nextEntry = 0;
continue;
}
Enumeration<LedgerEntry> entries = lh.readEntries(nextEntry, lh.getLastAddConfirmed());
while (entries.hasMoreElements()) {
LedgerEntry e = entries.nextElement();
long entryId = e.getEntryId();
byte[] entryData = e.getEntry();
LogEntry statusEdit = LogEntry.deserialize(entryData);
// LOGGER.log(Level.SEVERE, "" + tableSpaceUUID + " followentry {0},{1} -> {2}", new Object[]{previous, entryId, statusEdit});
LogSequenceNumber number = new LogSequenceNumber(previous, entryId);
lastSequenceNumber.accumulateAndGet(number.offset, EnsureLongIncrementAccumulator.INSTANCE);
lastLedgerId = number.ledgerId;
currentLedgerId = number.ledgerId;
consumer.accept(number, statusEdit);
}
} finally {
try {
lh.close();
} catch (BKException err) {
LOGGER.log(Level.SEVERE, "error while closing ledger", err);
} catch (InterruptedException err) {
LOGGER.log(Level.SEVERE, "error while closing ledger", err);
Thread.currentThread().interrupt();
}
}
}
} catch (InterruptedException | EOFException | BKException err) {
LOGGER.log(Level.SEVERE, "internal error", err);
throw new LogNotAvailableException(err);
}
}
use of herddb.log.LogEntry in project herddb by diennea.
the class BookkeeperCommitLog method recovery.
@Override
public void recovery(LogSequenceNumber snapshotSequenceNumber, BiConsumer<LogSequenceNumber, LogEntry> consumer, boolean fencing) throws LogNotAvailableException {
this.actualLedgersList = metadataManager.getActualLedgersList(tableSpaceUUID);
LOGGER.log(Level.SEVERE, "Actual ledgers list:" + actualLedgersList + " tableSpace " + tableSpaceUUID);
this.lastLedgerId = snapshotSequenceNumber.ledgerId;
this.currentLedgerId = snapshotSequenceNumber.ledgerId;
this.lastSequenceNumber.set(snapshotSequenceNumber.offset);
LOGGER.log(Level.SEVERE, "recovery from latest snapshotSequenceNumber:" + snapshotSequenceNumber);
if (currentLedgerId > 0 && !this.actualLedgersList.getActiveLedgers().contains(currentLedgerId) && !this.actualLedgersList.getActiveLedgers().isEmpty()) {
// TODO: download snapshot from another remote broker
throw new FullRecoveryNeededException(new Exception("Actual ledgers list does not include latest snapshot ledgerid:" + currentLedgerId));
}
if (snapshotSequenceNumber.isStartOfTime() && !this.actualLedgersList.getActiveLedgers().isEmpty() && !this.actualLedgersList.getActiveLedgers().contains(this.actualLedgersList.getFirstLedger())) {
throw new FullRecoveryNeededException(new Exception("Local data is absent, and actual ledger list " + this.actualLedgersList.getActiveLedgers() + " does not contain first ledger of ever: " + this.actualLedgersList.getFirstLedger()));
}
try {
for (long ledgerId : actualLedgersList.getActiveLedgers()) {
if (ledgerId < snapshotSequenceNumber.ledgerId) {
LOGGER.log(Level.SEVERE, "Skipping ledger " + ledgerId);
continue;
}
LedgerHandle handle;
if (fencing) {
handle = bookKeeper.openLedger(ledgerId, BookKeeper.DigestType.CRC32, sharedSecret.getBytes(StandardCharsets.UTF_8));
} else {
handle = bookKeeper.openLedgerNoRecovery(ledgerId, BookKeeper.DigestType.CRC32, sharedSecret.getBytes(StandardCharsets.UTF_8));
}
try {
long first;
if (ledgerId == snapshotSequenceNumber.ledgerId) {
first = snapshotSequenceNumber.offset;
LOGGER.log(Level.SEVERE, "Recovering from latest snapshot ledger " + ledgerId + ", starting from entry " + first);
} else {
first = 0;
LOGGER.log(Level.SEVERE, "Recovering from ledger " + ledgerId + ", starting from entry " + first);
}
long lastAddConfirmed = handle.getLastAddConfirmed();
LOGGER.log(Level.SEVERE, "Recovering from ledger " + ledgerId + ", first=" + first + " lastAddConfirmed=" + lastAddConfirmed);
final int BATCH_SIZE = 10000;
if (lastAddConfirmed >= 0) {
for (long b = first; b <= lastAddConfirmed; ) {
long start = b;
long end = b + BATCH_SIZE;
if (end > lastAddConfirmed) {
end = lastAddConfirmed;
}
b = end + 1;
double percent = ((start - first) * 100.0 / (lastAddConfirmed + 1));
int entriesToRead = (int) (1 + end - start);
LOGGER.log(Level.SEVERE, "From entry {0}, to entry {1} ({2} %)", new Object[] { start, end, percent });
long _start = System.currentTimeMillis();
Enumeration<LedgerEntry> entries = handle.readEntries(start, end);
int localEntryCount = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
long entryId = entry.getEntryId();
LogSequenceNumber number = new LogSequenceNumber(ledgerId, entryId);
LogEntry statusEdit = LogEntry.deserialize(entry.getEntry());
lastLedgerId = ledgerId;
currentLedgerId = ledgerId;
lastSequenceNumber.set(entryId);
if (number.after(snapshotSequenceNumber)) {
LOGGER.log(Level.FINEST, "RECOVER ENTRY #" + localEntryCount + " {0}, {1}", new Object[] { number, statusEdit });
consumer.accept(number, statusEdit);
} else {
LOGGER.log(Level.FINEST, "SKIP ENTRY #" + localEntryCount + " {0}<{1}, {2}", new Object[] { number, snapshotSequenceNumber, statusEdit });
}
localEntryCount++;
}
LOGGER.log(Level.SEVERE, "read " + localEntryCount + " entries from ledger " + ledgerId + ", expected " + entriesToRead);
LOGGER.log(Level.SEVERE, "finished waiting for " + entriesToRead + " entries to be read from ledger " + ledgerId);
if (localEntryCount != entriesToRead) {
throw new LogNotAvailableException("Read " + localEntryCount + " entries, expected " + entriesToRead);
}
lastLedgerId = ledgerId;
lastSequenceNumber.set(end);
long _stop = System.currentTimeMillis();
LOGGER.log(Level.SEVERE, "From entry {0}, to entry {1} ({2} %) read time {3}", new Object[] { start, end, percent, (_stop - _start) + " ms" });
}
}
} finally {
handle.close();
}
}
LOGGER.severe("After recovery of " + tableSpaceUUID + " lastSequenceNumber " + getLastSequenceNumber());
} catch (InterruptedException | EOFException | RuntimeException | BKException err) {
LOGGER.log(Level.SEVERE, "Fatal error during recovery", err);
signalLogFailed();
throw new LogNotAvailableException(err);
}
}
Aggregations