use of com.apple.foundationdb.Transaction in project fdb-record-layer by FoundationDB.
the class BunchedMapTest method runWithTwoTrs.
private void runWithTwoTrs(@Nonnull BiConsumer<? super Transaction, ? super Transaction> operation, boolean legal, @Nonnull List<Tuple> boundaryKeys) throws ExecutionException, InterruptedException {
final String id = "two-trs-" + UUID.randomUUID().toString();
try (Transaction tr1 = db.createTransaction();
Transaction tr2 = db.createTransaction()) {
tr1.options().setDebugTransactionIdentifier(id + "-1");
tr1.options().setLogTransaction();
tr2.options().setDebugTransactionIdentifier(id + "-2");
tr2.options().setLogTransaction();
CompletableFuture.allOf(tr1.getReadVersion(), tr2.getReadVersion()).get();
tr1.addWriteConflictKey(new byte[] { 0x01 });
tr2.addWriteConflictKey(new byte[] { 0x02 });
operation.accept(tr1, tr2);
tr1.commit().get();
if (legal) {
tr2.commit().get();
} else {
ExecutionException e = assertThrows(ExecutionException.class, () -> tr2.commit().get());
assertNotNull(e.getCause());
assertTrue(e.getCause() instanceof FDBException);
FDBException fdbE = (FDBException) e.getCause();
assertEquals(FDBError.NOT_COMMITTED.code(), fdbE.getCode());
}
}
verifyBoundaryKeys(boundaryKeys);
}
use of com.apple.foundationdb.Transaction in project fdb-record-layer by FoundationDB.
the class BunchedMapTest method concurrentIllegalUpdates.
@Test
public void concurrentIllegalUpdates() throws ExecutionException, InterruptedException {
final Tuple value = Tuple.from(Tuple.from((Object) null));
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(0L), value).join();
map.put(tr1, bmSubspace, Tuple.from(5L), value).join();
map.put(tr2, bmSubspace, Tuple.from(3L), value).join();
}, false, Collections.singletonList(Tuple.from(0L)));
try (Transaction tr = db.createTransaction()) {
tr.clear(bmSubspace.range());
tr.commit().get();
}
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(5L), value).join();
map.put(tr1, bmSubspace, Tuple.from(0L), value).join();
map.put(tr2, bmSubspace, Tuple.from(3L), value).join();
}, false, Collections.singletonList(Tuple.from(0L)));
try (Transaction tr = db.createTransaction()) {
tr.clear(bmSubspace.range());
tr.commit().get();
}
final List<Tuple> tuples = LongStream.range(100L, 115L).boxed().map(Tuple::from).collect(Collectors.toList());
db.run(tr -> {
tr.clear(bmSubspace.range());
tuples.forEach(t -> map.put(tr, bmSubspace, t, value).join());
return null;
});
// Case 1: Transaction reads the value of a boundary key while
// that boundary is updated.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(116L), value).join();
assertEquals(value, map.get(tr2, bmSubspace, Tuple.from(110L)).join().get());
}, false, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
// Case 2: Transaction reads the same key while
// a put goes onto the same key and changes the value.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(105L), value.add(3.14d)).join();
assertEquals(value, map.get(tr2, bmSubspace, Tuple.from(105L)).join().get());
}, false, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
assertEquals(value.add(3.14d), map.get(db, bmSubspace, Tuple.from(105L)).get().get());
// Case 3: One put changes a value while another put tries to set it to
// the same value.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(105L), value).join();
map.put(tr2, bmSubspace, Tuple.from(105L), value.add(3.14d)).join();
}, false, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
assertEquals(value, map.get(db, bmSubspace, Tuple.from(105L)).get().get());
// Case 4: Two puts happen at the same value at the same time.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(105L), value.add(3.14d)).join();
map.put(tr2, bmSubspace, Tuple.from(105L), value.add(2.72d)).join();
}, false, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
assertEquals(value.add(3.14d), map.get(db, bmSubspace, Tuple.from(105L)).get().get());
// Case 5: Something attempts to re-write something in the
// interior of the boundary while writing
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(116L), value.add(3.14d)).join();
map.put(tr2, bmSubspace, Tuple.from(117L), value).join();
}, false, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
assertFalse(map.containsKey(db, bmSubspace, Tuple.from(117L)).get());
// Case 6: Write a value that would end up being overwritten in a split
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(102L, null), value).join();
map.put(tr2, bmSubspace, Tuple.from(107L), value.add(3.14d)).join();
}, false, Arrays.asList(Tuple.from(100L), Tuple.from(104L), Tuple.from(110L)));
assertEquals(value, map.get(db, bmSubspace, Tuple.from(107L)).get().get());
// Case 7: Write a key before the current value that eats the value
// from a boundary key while another transaction does the same thing
// (Note that this test fails if the read conflict ranges are added
// after the writes, so this tests to make sure that that is done
// properly.)
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(98L), value).join();
map.put(tr2, bmSubspace, Tuple.from(99L), value).join();
}, false, Arrays.asList(Tuple.from(98L), Tuple.from(104L), Tuple.from(110L)));
// Case 8: The same as case 7, but the greater key wins instead of
// the smaller key
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(97L), value).join();
map.put(tr2, bmSubspace, Tuple.from(96L), value).join();
}, false, Arrays.asList(Tuple.from(97L), Tuple.from(104L), Tuple.from(110L)));
try (Transaction tr = db.createTransaction()) {
List<Map.Entry<Tuple, Tuple>> entryList = AsyncUtil.collectRemaining(map.scan(tr, bmSubspace)).get();
System.out.println(entryList);
}
}
use of com.apple.foundationdb.Transaction in project fdb-record-layer by FoundationDB.
the class BunchedMap method compact.
/**
* Compact the values within the map into as few keys as possible. This will scan through and re-write
* the keys to be optimal. This feature is experimental at the moment, but it should be used to better
* pack entries if needed.
*
* @param tcx database or transaction to use when compacting data
* @param subspace subspace within which the map's data are located
* @param keyLimit maximum number of database keys to read in a single transaction
* @param continuation the continuation returned from a previous call or <code>null</code>
* to start from the beginning of the subspace
* @return future that will complete with a continuation that can be used to complete
* the compaction across multiple transactions (<code>null</code> if finished)
*/
@Nonnull
protected CompletableFuture<byte[]> compact(@Nonnull TransactionContext tcx, @Nonnull Subspace subspace, int keyLimit, @Nullable byte[] continuation) {
return tcx.runAsync(tr -> {
byte[] subspaceKey = subspace.getKey();
byte[] begin = (continuation == null) ? subspaceKey : continuation;
byte[] end = subspace.range().end;
final AsyncIterable<KeyValue> iterable = tr.snapshot().getRange(begin, end, keyLimit);
List<Map.Entry<K, V>> currentEntryList = new ArrayList<>(bunchSize);
// The estimated size can be off (and will be off for many implementations of BunchedSerializer),
// but it is just a heuristic to know when to split, so that's fine (I claim).
AtomicInteger currentEntrySize = new AtomicInteger(0);
AtomicInteger readKeys = new AtomicInteger(0);
AtomicReference<byte[]> lastReadKeyBytes = new AtomicReference<>(null);
AtomicReference<K> lastKey = new AtomicReference<>(null);
return AsyncUtil.forEach(iterable, kv -> {
final K boundaryKey = serializer.deserializeKey(kv.getKey(), subspaceKey.length);
final List<Map.Entry<K, V>> entriesFromKey = serializer.deserializeEntries(boundaryKey, kv.getValue());
readKeys.incrementAndGet();
if (entriesFromKey.size() >= bunchSize && currentEntryList.isEmpty()) {
// Nothing can be done. Just move on.
lastReadKeyBytes.set(null);
return;
}
if (lastReadKeyBytes.get() == null) {
lastReadKeyBytes.set(kv.getKey());
}
final byte[] endKeyBytes = ByteArrayUtil.join(subspaceKey, serializer.serializeKey(entriesFromKey.get(entriesFromKey.size() - 1).getKey()), ZERO_ARRAY);
tr.addReadConflictRange(lastReadKeyBytes.get(), endKeyBytes);
tr.addWriteConflictRange(lastReadKeyBytes.get(), kv.getKey());
lastReadKeyBytes.set(endKeyBytes);
tr.clear(kv.getKey());
instrumentDelete(kv.getKey(), kv.getValue());
for (Map.Entry<K, V> entry : entriesFromKey) {
byte[] serializedEntry = serializer.serializeEntry(entry);
if (currentEntrySize.get() + serializedEntry.length > MAX_VALUE_SIZE && !currentEntryList.isEmpty()) {
flushEntryList(tr, subspaceKey, currentEntryList, lastKey);
currentEntrySize.set(0);
}
currentEntryList.add(entry);
currentEntrySize.addAndGet(serializedEntry.length);
if (currentEntryList.size() == bunchSize) {
flushEntryList(tr, subspaceKey, currentEntryList, lastKey);
currentEntrySize.set(0);
}
}
}, tr.getExecutor()).thenApply(vignore -> {
if (!currentEntryList.isEmpty()) {
flushEntryList(tr, subspaceKey, currentEntryList, lastKey);
}
// Return a valid continuation if there might be more keys
if (lastKey.get() != null && keyLimit != ReadTransaction.ROW_LIMIT_UNLIMITED && readKeys.get() == keyLimit) {
return ByteArrayUtil.join(subspaceKey, serializer.serializeKey(lastKey.get()), ZERO_ARRAY);
} else {
return null;
}
});
});
}
use of com.apple.foundationdb.Transaction in project fdb-record-layer by FoundationDB.
the class KeySpaceDirectoryTest method testListObeysTimeLimits.
@Test
public void testListObeysTimeLimits() {
KeySpace root = new KeySpace(new KeySpaceDirectory("root", KeyType.STRING, "root-" + random.nextInt(Integer.MAX_VALUE)).addSubdirectory(new KeySpaceDirectory("a", KeyType.LONG).addSubdirectory(new KeySpaceDirectory("b", KeyType.LONG).addSubdirectory(new KeySpaceDirectory("c", KeyType.LONG)))));
final FDBDatabase database = FDBDatabaseFactory.instance().getDatabase();
try (FDBRecordContext context = database.openContext()) {
Transaction tr = context.ensureActive();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 5; k++) {
tr.set(root.path("root").add("a", i).add("b", j).add("c", k).toTuple(context).pack(), Tuple.from(i + j).pack());
}
}
}
tr.commit().join();
}
try (FDBRecordContext context = database.openContext()) {
// Iteration will inject a 1ms pause in each "a" value we iterate over (there are 10 of them)
// so we want to make the time limit long enough to make *some* progress, but short enough to
// to make sure we cannot get them all.
ScanProperties props = new ScanProperties(ExecuteProperties.newBuilder().setFailOnScanLimitReached(false).setTimeLimit(5L).build());
// The inner and outer iterator are declared here instead of in-line with the call to flatMapPipelined
// because IntelliJ was having issues groking the call as a single call.
Function<byte[], RecordCursor<ResolvedKeySpacePath>> aIterator = outerContinuation -> root.path("root").listSubdirectoryAsync(context, "a", outerContinuation, props).map(value -> {
sleep(1L);
return value;
});
BiFunction<ResolvedKeySpacePath, byte[], RecordCursor<ResolvedKeySpacePath>> bIterator = (aPath, innerContinuation) -> aPath.toPath().add("b", 0).listSubdirectoryAsync(context, "c", innerContinuation, props);
RecordCursor<ResolvedKeySpacePath> cursor = RecordCursor.flatMapPipelined(aIterator, bIterator, null, 10);
long count = cursor.getCount().join();
assertEquals(RecordCursor.NoNextReason.TIME_LIMIT_REACHED, cursor.getNext().getNoNextReason());
// With a 1ms delay we should read no more than 5 "a" values (there are a total of 10)
// and each "c" value has 4 values. so we shouldn't have been able to read more than 40
// total values.
assertTrue(count <= 40, "Read too many values, query should have timed out");
}
}
use of com.apple.foundationdb.Transaction in project fdb-record-layer by FoundationDB.
the class HighContentionAllocatorTest method validateAllocation.
private void validateAllocation(FDBRecordContext context, HighContentionAllocator hca, Map<Long, String> allocations) {
Subspace allocationSubspace = hca.getAllocationSubspace();
Transaction transaction = context.ensureActive();
List<KeyValue> keyValueList = transaction.getRange(allocationSubspace.range()).asList().join();
Map<Long, String> storedAllocations = keyValueList.stream().collect(Collectors.toMap(kv -> extractKey(allocationSubspace, kv), this::extractValue));
assertThat("we see the allocated keys in the subspace", allocations.entrySet(), containsInAnyOrder(storedAllocations.entrySet().toArray()));
}
Aggregations