use of org.cojen.tupl.DatabaseException in project Tupl by cojen.
the class Node method updateLeafValue.
/**
* @param pos position as provided by binarySearch; must be positive
* @param vfrag 0 or ENTRY_FRAGMENTED
*/
void updateLeafValue(BTree tree, int pos, int vfrag, byte[] value) throws IOException {
var page = mPage;
final int searchVecStart = searchVecStart();
final int start;
final int keyLen;
final int garbage;
quick: {
int loc;
start = loc = p_ushortGetLE(page, searchVecStart + pos);
loc += keyLengthAtLoc(page, loc);
final int valueHeaderLoc = loc;
// Note: Similar to leafEntryLengthAtLoc and retrieveLeafValueAtLoc.
int len = p_byteGet(page, loc++);
if (len < 0)
largeValue: {
int header;
if ((len & 0x20) == 0) {
header = len;
len = 1 + (((len & 0x1f) << 8) | p_ubyteGet(page, loc++));
} else if (len != -1) {
header = len;
len = 1 + (((len & 0x0f) << 16) | (p_ubyteGet(page, loc++) << 8) | p_ubyteGet(page, loc++));
} else {
// ghost
len = 0;
break largeValue;
}
if ((header & ENTRY_FRAGMENTED) != 0) {
tree.mDatabase.deleteFragments(page, loc, len);
// Clearing the fragmented bit prevents the update from double-deleting the
// fragments, and it also allows the old entry slot to be re-used.
p_bytePut(page, valueHeaderLoc, header & ~ENTRY_FRAGMENTED);
}
}
final int valueLen = value.length;
if (valueLen > len) {
// Old entry is too small, and so it becomes garbage.
// TODO: Try to extend the length instead of creating garbage.
keyLen = valueHeaderLoc - start;
garbage = garbage() + loc + len - start;
break quick;
}
if (valueLen == len) {
// Quick copy with no garbage created.
if (valueLen == 0) {
// Ensure ghost is replaced.
p_bytePut(page, valueHeaderLoc, 0);
} else {
p_copyFromArray(value, 0, page, loc, valueLen);
if (vfrag != 0) {
p_bytePut(page, valueHeaderLoc, p_byteGet(page, valueHeaderLoc) | vfrag);
}
}
} else {
// New entry is smaller, so some space is freed.
int valueLoc = copyToLeafValue(page, vfrag, value, valueHeaderLoc);
spaceFreed(valueLoc + valueLen, loc + len);
}
return;
}
// What follows is similar to createLeafEntry method, except the search
// vector doesn't grow.
int searchVecEnd = searchVecEnd();
int leftSpace = searchVecStart - leftSegTail();
int rightSpace = rightSegTail() - searchVecEnd - 1;
final int vfragOriginal = vfrag;
int encodedLen;
if (vfrag != 0) {
encodedLen = keyLen + calculateFragmentedValueLength(value);
} else {
LocalDatabase db = tree.mDatabase;
encodedLen = keyLen + calculateLeafValueLength(value);
if (encodedLen > db.mMaxEntrySize) {
value = db.fragment(value, value.length, db.mMaxFragmentedEntrySize - keyLen);
if (value == null) {
throw new AssertionError();
}
encodedLen = keyLen + calculateFragmentedValueLength(value);
vfrag = ENTRY_FRAGMENTED;
}
}
int entryLoc;
alloc: try {
if ((entryLoc = allocPageEntry(encodedLen, leftSpace, rightSpace)) >= 0) {
pos += searchVecStart;
break alloc;
}
// Compute remaining space surrounding search vector after update completes.
int remaining = leftSpace + rightSpace - encodedLen;
if (garbage > remaining) {
// Do full compaction and free up the garbage, or split the node.
var akeyRef = new byte[1][];
boolean isOriginal = retrieveActualKeyAtLoc(page, start, akeyRef);
byte[] akey = akeyRef[0];
if ((garbage + remaining) < 0) {
if (mSplit == null) {
// TODO: use frame for rebalancing
// Node is full, so split it.
byte[] okey = isOriginal ? akey : retrieveKeyAtLoc(this, page, start);
splitLeafAndCreateEntry(tree, okey, akey, vfrag, value, encodedLen, pos, false);
return;
}
// Node is already split, and so value is too large.
if (vfrag != 0) {
// Not expected.
throw new DatabaseException("Fragmented entry doesn't fit");
}
LocalDatabase db = tree.mDatabase;
int max = Math.min(db.mMaxFragmentedEntrySize, garbage + leftSpace + rightSpace);
value = db.fragment(value, value.length, max - keyLen);
if (value == null) {
throw new AssertionError();
}
encodedLen = keyLen + calculateFragmentedValueLength(value);
vfrag = ENTRY_FRAGMENTED;
}
garbage(garbage);
entryLoc = compactLeaf(encodedLen, pos, false);
page = mPage;
entryLoc = isOriginal ? encodeNormalKey(akey, page, entryLoc) : encodeFragmentedKey(akey, page, entryLoc);
copyToLeafValue(page, vfrag, value, entryLoc);
return;
}
int vecLen = searchVecEnd - searchVecStart + 2;
int newSearchVecStart;
if (remaining > 0 || (rightSegTail() & 1) != 0) {
// Re-center search vector, biased to the right, ensuring proper alignment.
newSearchVecStart = (rightSegTail() - vecLen + (1 - 0) - (remaining >> 1)) & ~1;
// Allocate entry from left segment.
entryLoc = leftSegTail();
leftSegTail(entryLoc + encodedLen);
} else if ((leftSegTail() & 1) == 0) {
// Move search vector left, ensuring proper alignment.
newSearchVecStart = leftSegTail() + ((remaining >> 1) & ~1);
// Allocate entry from right segment.
entryLoc = rightSegTail() - encodedLen + 1;
rightSegTail(entryLoc - 1);
} else {
// Search vector is misaligned, so do full compaction.
var akeyRef = new byte[1][];
int loc = p_ushortGetLE(page, searchVecStart + pos);
boolean isOriginal = retrieveActualKeyAtLoc(page, loc, akeyRef);
byte[] akey = akeyRef[0];
garbage(garbage);
entryLoc = compactLeaf(encodedLen, pos, false);
page = mPage;
entryLoc = isOriginal ? encodeNormalKey(akey, page, entryLoc) : encodeFragmentedKey(akey, page, entryLoc);
copyToLeafValue(page, vfrag, value, entryLoc);
return;
}
p_copy(page, searchVecStart, page, newSearchVecStart, vecLen);
pos += newSearchVecStart;
searchVecStart(newSearchVecStart);
searchVecEnd(newSearchVecStart + vecLen - 2);
} catch (Throwable e) {
if (vfrag == ENTRY_FRAGMENTED && vfragOriginal != ENTRY_FRAGMENTED) {
cleanupFragments(e, value);
}
throw e;
}
// Copy existing key, and then copy value.
p_copy(page, start, page, entryLoc, keyLen);
copyToLeafValue(page, vfrag, value, entryLoc + keyLen);
p_shortPutLE(page, pos, entryLoc);
garbage(garbage);
}
use of org.cojen.tupl.DatabaseException in project Tupl by cojen.
the class Checkpointer method run.
@Override
public void run() {
try {
if (mState == STATE_INIT) {
// Start with an initial forced checkpoint.
CoreDatabase db = mDatabaseRef.get();
if (db != null) {
db.checkpoint();
}
mState = STATE_RUNNING;
}
if (mRefQueue != null) {
// When the checkpoint rate is negative (infinite delay), this thread is
// suspended until the database isn't referenced anymore, or until the database
// is explicitly closed.
mRefQueue.remove();
close(null);
return;
}
long lastDurationNanos = 0;
while (true) {
long delayMillis = (mRateNanos - lastDurationNanos) / 1000000L;
if (delayMillis > 0) {
Thread.sleep(delayMillis);
}
CoreDatabase db = mDatabaseRef.get();
if (db == null) {
close(null);
return;
}
if (isSuspended()) {
// Don't actually suspend the thread, allowing for weak reference checks.
lastDurationNanos = 0;
} else
try {
long startNanos = System.nanoTime();
db.checkpoint(mSizeThreshold, mDelayThresholdNanos);
long endNanos = System.nanoTime();
lastDurationNanos = endNanos - startNanos;
} catch (DatabaseException e) {
EventListener listener = db.eventListener();
if (listener != null) {
listener.notify(EventType.CHECKPOINT_FAILED, "Checkpoint failed: %1$s", e);
}
if (!e.isRecoverable()) {
throw e;
}
lastDurationNanos = 0;
}
}
} catch (Throwable e) {
if (mState != STATE_CLOSED) {
CoreDatabase db = mDatabaseRef.get();
if (db != null) {
Utils.closeQuietly(db, e);
}
}
close(e);
}
}
use of org.cojen.tupl.DatabaseException in project Tupl by cojen.
the class ReplController method ready.
public void ready(long initialPosition, long initialTxnId) throws IOException {
acquireExclusive();
try {
mLeaderNotifyCondition = new LatchCondition();
// Init for the shouldCheckpoint method. Without this, an initial checkpoint is
// performed even if it's not necessary.
cCheckpointPosHandle.setOpaque(this, initialPosition | (1L << 63));
} finally {
releaseExclusive();
}
ReplDecoder decoder = mEngine.startReceiving(initialPosition, initialTxnId);
if (decoder == null) {
// Failed to start, and database has been closed with an exception.
return;
}
CoreDatabase db = mEngine.mDatabase;
// Can now send control messages.
mRepl.controlMessageAcceptor(message -> {
try {
db.writeControlMessage(message);
} catch (UnmodifiableReplicaException e) {
// Drop it.
} catch (Throwable e) {
Utils.uncaught(e);
}
});
// Can now accept snapshot requests.
mRepl.snapshotRequestAcceptor(sender -> {
try {
ReplUtils.sendSnapshot(db, sender);
} catch (Throwable e) {
Utils.closeQuietly(sender);
if (e instanceof DatabaseException || !(e instanceof IOException)) {
Utils.uncaught(e);
}
}
});
// Update the local member role.
mRepl.start();
// Wait until replication has "caught up" before returning.
boolean isLeader = decoder.catchup();
// We're not truly caught up until all outstanding redo operations have been applied.
// Suspend and resume does the trick.
mEngine.suspend();
mEngine.resume();
// Wait for leaderNotify method to be called. The local member might be the leader now,
// or the new leadership might have been immediately revoked. Either case is detected.
acquireExclusive();
try {
if (isLeader && mLeaderNotifyCondition != null) {
mLeaderNotifyCondition.await(this);
}
} finally {
mLeaderNotifyCondition = null;
releaseExclusive();
}
}
use of org.cojen.tupl.DatabaseException in project Tupl by cojen.
the class CryptoPageArray method readPage.
@Override
public void readPage(long index, byte[] dst) throws IOException {
try {
mSource.readPage(index, dst);
mCrypto.decryptPage(index, pageSize(), dst, 0);
} catch (GeneralSecurityException e) {
throw new DatabaseException(e);
}
}
use of org.cojen.tupl.DatabaseException in project Tupl by cojen.
the class CryptoPageArray method readPage.
@Override
public void readPage(long index, long dstPtr) throws IOException {
try {
mSource.readPage(index, dstPtr);
mCrypto.decryptPage(index, pageSize(), dstPtr, 0);
} catch (GeneralSecurityException e) {
throw new DatabaseException(e);
}
}
Aggregations