use of herddb.utils.Holder in project herddb by diennea.
the class BLinkTest method testScanHeadNotExistent.
@Test
public void testScanHeadNotExistent() throws Exception {
BLinkIndexDataStorage<Sized<Long>, Long> storage = new DummyBLinkIndexDataStorage<>();
try (BLink<Sized<Long>, Long> blink = new BLink<>(2048L, new LongSizeEvaluator(), new RandomPageReplacementPolicy(10), storage)) {
final long headNonExistent = 100;
final long inserts = 100;
for (long l = headNonExistent; l < inserts + headNonExistent; l++) {
blink.insert(Sized.valueOf(l), l);
}
BLinkMetadata<Sized<Long>> metadata = blink.checkpoint();
/* Require at least two nodes! */
assertNotEquals(1, metadata.nodes.size());
long offset = 10;
for (long l = 0; l < headNonExistent - offset; l++) {
Stream<Entry<Sized<Long>, Long>> stream = blink.scan(Sized.valueOf(l), Sized.valueOf(l + offset));
Holder<Long> h = new Holder<>(l);
Holder<Long> count = new Holder<>(0L);
StringBuilder builder = new StringBuilder();
/* Check each value */
stream.forEach(entry -> {
assertEquals(h.value, entry.getValue());
h.value++;
count.value++;
builder.append(entry.getValue()).append(", ");
});
assertEquals(0, (long) count.value);
}
}
}
use of herddb.utils.Holder in project herddb by diennea.
the class BLinkTest method testScanDotNotExistent.
@Test
public void testScanDotNotExistent() throws Exception {
BLinkIndexDataStorage<Sized<Long>, Long> storage = new DummyBLinkIndexDataStorage<>();
try (BLink<Sized<Long>, Long> blink = new BLink<>(2048L, new LongSizeEvaluator(), new RandomPageReplacementPolicy(10), storage)) {
final long headInserts = 100;
final long nonExistents = 10;
final long tailInserts = 100;
for (long l = 0; l < headInserts; l++) {
blink.insert(Sized.valueOf(l), l);
}
for (long l = headInserts + nonExistents; l < headInserts + nonExistents + tailInserts; l++) {
blink.insert(Sized.valueOf(l), l);
}
BLinkMetadata<Sized<Long>> metadata = blink.checkpoint();
/* Require at least two nodes! */
assertNotEquals(1, metadata.nodes.size());
long offset = 100;
for (long l = nonExistents; l < headInserts + nonExistents - offset; l++) {
Stream<Entry<Sized<Long>, Long>> stream = blink.scan(Sized.valueOf(l), Sized.valueOf(l + offset));
Holder<Long> h = new Holder<>(l);
Holder<Long> count = new Holder<>(0L);
StringBuilder builder = new StringBuilder();
/* Check each value */
stream.forEach(entry -> {
assertEquals(h.value, entry.getValue());
h.value++;
count.value++;
builder.append(entry.getValue()).append(", ");
});
builder.setLength(builder.length() - 2);
System.out.println("start " + l + " end " + (l + offset) + " -> " + builder);
assertEquals(offset - nonExistents, (long) count.value);
}
}
}
use of herddb.utils.Holder in project herddb by diennea.
the class BLinkTest method testScanTailNotExistent.
@Test
public void testScanTailNotExistent() throws Exception {
BLinkIndexDataStorage<Sized<Long>, Long> storage = new DummyBLinkIndexDataStorage<>();
try (BLink<Sized<Long>, Long> blink = new BLink<>(2048L, new LongSizeEvaluator(), new RandomPageReplacementPolicy(10), storage)) {
final long inserts = 100;
final long tailNonExistent = 100;
for (long l = 0; l < inserts; l++) {
blink.insert(Sized.valueOf(l), l);
}
BLinkMetadata<Sized<Long>> metadata = blink.checkpoint();
/* Require at least two nodes! */
assertNotEquals(1, metadata.nodes.size());
long offset = 10;
for (long l = inserts; l < tailNonExistent - offset; l++) {
Stream<Entry<Sized<Long>, Long>> stream = blink.scan(Sized.valueOf(l), Sized.valueOf(l + offset));
Holder<Long> h = new Holder<>(l);
Holder<Long> count = new Holder<>(0L);
StringBuilder builder = new StringBuilder();
/* Check each value */
stream.forEach(entry -> {
assertEquals(h.value, entry.getValue());
h.value++;
count.value++;
builder.append(entry.getValue()).append(", ");
});
assertEquals(0, (long) count.value);
}
}
}
use of herddb.utils.Holder in project herddb by diennea.
the class BackupUtils method dumpTablespace.
private static void dumpTablespace(String schema, int fetchSize, HDBConnection hdbconnection, ExtendedDataOutputStream out, ProgressListener listener) throws Exception {
Holder<Throwable> errorHolder = new Holder<>();
CountDownLatch waiter = new CountDownLatch(1);
hdbconnection.dumpTableSpace(schema, new TableSpaceDumpFileWriter(listener, errorHolder, waiter, schema, out), fetchSize, true);
if (errorHolder.value != null) {
throw new Exception(errorHolder.value);
}
waiter.await();
}
use of herddb.utils.Holder in project herddb by diennea.
the class TableManager method executeUpdate.
private StatementExecutionResult executeUpdate(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
AtomicInteger updateCount = new AtomicInteger();
Holder<Bytes> lastKey = new Holder<>();
Holder<byte[]> lastValue = new Holder<>();
/*
an update can succeed only if the row is valid, the key is contains in the "keys" structure
the update will simply override the value of the row, assigning a null page to the row
the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value (CAS operation)
locks: the update uses a lock on the the key
*/
RecordFunction function = update.getFunction();
long transactionId = transaction != null ? transaction.transactionId : 0;
Predicate predicate = update.getPredicate();
ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
accessTableData(scan, context, new ScanResultOperation() {
@Override
public void accept(Record actual) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
byte[] newValue = function.computeNewValue(actual, context, tableContext);
final long size = DataPage.estimateEntrySize(actual.key, newValue);
if (size > maxLogicalPageSize) {
throw new RecordTooBigException("New version of record " + actual.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(actual) + ", max size " + maxLogicalPageSize);
}
LogEntry entry = LogEntryFactory.update(table, actual.key.data, newValue, transaction);
CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
apply(pos, entry, false);
lastKey.value = actual.key;
lastValue.value = newValue;
updateCount.incrementAndGet();
}
}, transaction, true, true);
return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
}
Aggregations