use of herddb.log.LogEntry in project herddb by diennea.
the class TableManager method executeTruncate.
private StatementExecutionResult executeTruncate(TruncateTableStatement truncate, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
if (transaction != null) {
throw new StatementExecutionException("TRUNCATE TABLE cannot be executed within the context of a Transaction");
}
try {
long estimatedSize = keyToPage.size();
LogEntry entry = LogEntryFactory.truncate(table, null);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
return new DMLStatementExecutionResult(0, estimatedSize > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) estimatedSize, null, null);
} catch (LogNotAvailableException error) {
throw new StatementExecutionException(error);
}
}
use of herddb.log.LogEntry in project herddb by diennea.
the class TableManager method executeUpdate.
private StatementExecutionResult executeUpdate(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
/*
an update can succeed only if the row is valid, the key is contains in the "keys" structure
the update will simply override the value of the row, assigning a null page to the row
the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value (CAS operation)
locks: the update uses a lock on the the key
*/
RecordFunction function = update.getFunction();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = update.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
byte[] newValue = function.computeNewValue(actual, context, tableContext);
final long size = DataPage.estimateEntrySize(actual.key, newValue);
if (size > maxLogicalPageSize) {
throw new RecordTooBigException("New version of record " + actual.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(actual) + ", max size " + maxLogicalPageSize);
}
LogEntry entry = LogEntryFactory.update(table, actual.key.data, newValue, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = newValue;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
use of herddb.log.LogEntry in project herddb by diennea.
the class TableManager method executeDelete.
private StatementExecutionResult executeDelete(DeleteStatement delete, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = delete.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
LogEntry entry = LogEntryFactory.delete(table, actual.key.data, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = actual.value.data;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, delete.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
use of herddb.log.LogEntry in project herddb by diennea.
the class MemoryCommitLogManager method createCommitLog.
@Override
public CommitLog createCommitLog(String tableSpace) {
return new CommitLog() {
AtomicLong offset = new AtomicLong(-1);
@Override
public CommitLogResult log(LogEntry entry, boolean synch) throws LogNotAvailableException {
if (isHasListeners()) {
synch = true;
}
// NOOP
entry.serialize();
LogSequenceNumber logPos = new LogSequenceNumber(1, offset.incrementAndGet());
notifyListeners(logPos, entry);
return new CommitLogResult(logPos, !synch);
}
@Override
public LogSequenceNumber getLastSequenceNumber() {
return new LogSequenceNumber(1, offset.get());
}
private volatile boolean closed;
@Override
public void close() throws LogNotAvailableException {
closed = true;
}
@Override
public boolean isFailed() {
return false;
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public void recovery(LogSequenceNumber snapshotSequenceNumber, BiConsumer<LogSequenceNumber, LogEntry> consumer, boolean fencing) throws LogNotAvailableException {
}
@Override
public void dropOldLedgers(LogSequenceNumber lastCheckPointSequenceNumber) throws LogNotAvailableException {
}
@Override
public void followTheLeader(LogSequenceNumber skipPast, BiConsumer<LogSequenceNumber, LogEntry> consumer) throws LogNotAvailableException {
}
@Override
public void startWriting() throws LogNotAvailableException {
}
@Override
public void clear() throws LogNotAvailableException {
}
};
}
use of herddb.log.LogEntry in project herddb by diennea.
the class FileCommitLogTest method testDiskFullLogMissingFooter.
@Test
public void testDiskFullLogMissingFooter() throws Exception {
try (FileCommitLogManager manager = new FileCommitLogManager(folder.newFolder().toPath(), 64 * 1024 * 1024)) {
int writeCount = 0;
final long _startWrite = System.currentTimeMillis();
try (CommitLog log = manager.createCommitLog("tt")) {
log.startWriting();
for (int i = 0; i < 100; i++) {
log.log(LogEntryFactory.beginTransaction(0), false);
writeCount++;
}
FileCommitLog fileCommitLog = (FileCommitLog) log;
// simulate end of disk
byte[] dummyEntry = LogEntryFactory.beginTransaction(0).serialize();
// header
fileCommitLog.getWriter().out.write(ENTRY_START);
fileCommitLog.getWriter().out.writeLong(0);
// entry
fileCommitLog.getWriter().out.write(dummyEntry);
// missing entry footer
fileCommitLog.getWriter().out.flush();
}
final long _endWrite = System.currentTimeMillis();
AtomicInteger readCount = new AtomicInteger();
try (CommitLog log = manager.createCommitLog("tt")) {
log.recovery(LogSequenceNumber.START_OF_TIME, new BiConsumer<LogSequenceNumber, LogEntry>() {
@Override
public void accept(LogSequenceNumber t, LogEntry u) {
readCount.incrementAndGet();
}
}, true);
}
final long _endRead = System.currentTimeMillis();
assertEquals(writeCount, readCount.get());
System.out.println("Write time: " + (_endWrite - _startWrite) + " ms");
System.out.println("Read time: " + (_endRead - _endWrite) + " ms");
// must be able to read twice
AtomicInteger readCount2 = new AtomicInteger();
try (CommitLog log = manager.createCommitLog("tt")) {
log.recovery(LogSequenceNumber.START_OF_TIME, new BiConsumer<LogSequenceNumber, LogEntry>() {
@Override
public void accept(LogSequenceNumber t, LogEntry u) {
readCount2.incrementAndGet();
}
}, true);
}
assertEquals(writeCount, readCount.get());
}
}
Aggregations