use of com.apple.foundationdb.Database in project fdb-record-layer by FoundationDB.
the class ClientLogEventCounterTest method main.
@SuppressWarnings("PMD.SystemPrintln")
public static void main(String[] args) {
String cluster = null;
Instant start = null;
Instant end = null;
boolean countReads = true;
boolean countWrites = false;
final boolean countSingleKeys = true;
final boolean countRanges = true;
if (args.length > 0) {
cluster = args[0];
}
if (args.length > 1) {
start = ZonedDateTime.parse(args[1]).toInstant();
}
if (args.length > 2) {
end = ZonedDateTime.parse(args[2]).toInstant();
}
if (args.length > 3) {
String arg = args[3];
countReads = "READ".equals(arg) || "BOTH".equals(arg);
countWrites = "WRITE".equals(arg) || "BOTH".equals(arg);
}
FDB fdb = FDB.selectAPIVersion(630);
Database database = fdb.open(cluster);
Executor executor = database.getExecutor();
TupleKeyCountTree root = new TupleKeyCountTree();
DatabaseClientLogEventCounter counter = new DatabaseClientLogEventCounter(root, countReads, countWrites, countSingleKeys, countRanges, true);
TupleKeyCountTree.Printer printer = (depth, path) -> {
for (int i = 0; i < depth; i++) {
System.out.print(" ");
}
System.out.print(path.stream().map(Object::toString).collect(Collectors.joining("/")));
int percent = (path.get(0).getCount() * 100) / path.get(0).getParent().getCount();
System.out.println(" " + percent + "%");
};
int eventLimit = 10_000;
long timeLimit = 15_000;
DatabaseClientLogEvents events = DatabaseClientLogEvents.forEachEventBetweenTimestamps(database, executor, counter, start, end, eventLimit, timeLimit).join();
while (true) {
System.out.println(events.getEarliestTimestamp() + " - " + events.getLatestTimestamp());
root.hideLessThanFraction(0.10);
root.printTree(printer, "/");
if (!events.hasMore()) {
break;
}
events.forEachEventContinued(database, executor, counter, eventLimit, timeLimit).join();
}
}
use of com.apple.foundationdb.Database in project fdb-record-layer by FoundationDB.
the class DatabaseClientLogEventsTest method main.
@SuppressWarnings("PMD.SystemPrintln")
public static void main(String[] args) {
String cluster = null;
Instant start = null;
Instant end = null;
if (args.length > 0) {
cluster = args[0];
}
if (args.length > 1) {
start = ZonedDateTime.parse(args[1]).toInstant();
}
if (args.length > 2) {
end = ZonedDateTime.parse(args[2]).toInstant();
}
FDB fdb = FDB.selectAPIVersion(630);
Database database = fdb.open(cluster);
Executor executor = database.getExecutor();
DatabaseClientLogEvents.EventConsumer consumer = (tr, event) -> {
System.out.println(event);
return AsyncUtil.DONE;
};
DatabaseClientLogEvents.forEachEventBetweenTimestamps(database, executor, consumer, start, end, Integer.MAX_VALUE, Long.MAX_VALUE).join();
}
use of com.apple.foundationdb.Database in project fdb-record-layer by FoundationDB.
the class BunchedMapTest method stressTest.
private void stressTest(final Random r, final int trTotal, final int opTotal, final int keyCount, final int workerCount, boolean addBytesToValue, AtomicLong globalTrCount, int mapCount) throws InterruptedException, ExecutionException {
final long initialTrCount = globalTrCount.get();
final Subspace logSubspace = DirectoryLayer.getDefault().createOrOpen(db, PathUtil.from(getClass().getName(), "log")).get();
db.run(tr -> {
tr.clear(bmSubspace.range());
tr.clear(logSubspace.range());
// If the database is empty, putting these here stop scans from hitting the log subspace within a transaction
tr.set(logSubspace.getKey(), new byte[0]);
tr.set(ByteArrayUtil.join(logSubspace.getKey(), new byte[] { (byte) 0xff }), new byte[0]);
return null;
});
final List<CompletableFuture<Void>> workers = Stream.generate(() -> {
int bunchSize = r.nextInt(15) + 1;
BunchedMap<Tuple, Tuple> workerMap = new BunchedMap<>(serializer, Comparator.naturalOrder(), bunchSize);
AtomicInteger trCount = new AtomicInteger(0);
return AsyncUtil.whileTrue(() -> {
final Transaction tr = db.createTransaction();
tr.options().setDebugTransactionIdentifier("stress-tr-" + globalTrCount.getAndIncrement());
tr.options().setLogTransaction();
final AtomicInteger opCount = new AtomicInteger(0);
final AtomicInteger localOrder = new AtomicInteger(0);
return AsyncUtil.whileTrue(() -> {
int opCode = r.nextInt(4);
CompletableFuture<?> op;
if (opCode == 0) {
// Random put
CompletableFuture<?>[] futures = new CompletableFuture<?>[mapCount];
for (int i = 0; i < mapCount; i++) {
if (r.nextBoolean()) {
Tuple key = Tuple.from(r.nextInt(keyCount));
Tuple value;
if (addBytesToValue) {
int byteLength = r.nextInt(5000);
byte[] bytes = new byte[byteLength];
r.nextBytes(bytes);
value = Tuple.from(r.nextLong(), bytes);
} else {
value = Tuple.from(r.nextLong());
}
tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, i, localOrder), Tuple.from("PUT", key, value).pack());
futures[i] = workerMap.put(tr, bmSubspace.subspace(Tuple.from(i)), key, value);
} else {
futures[i] = AsyncUtil.DONE;
}
}
op = CompletableFuture.allOf(futures);
} else if (opCode == 1) {
// Read a random key.
int mapIndex = r.nextInt(mapCount);
Tuple key = Tuple.from(r.nextInt(keyCount));
op = workerMap.get(tr, bmSubspace.get(mapIndex), key).thenAccept(optionalValue -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("GET", key, optionalValue.orElse(null)).pack()));
} else if (opCode == 2) {
// Check contains key
int mapIndex = r.nextInt(mapCount);
Tuple key = Tuple.from(r.nextInt(keyCount));
op = workerMap.containsKey(tr, bmSubspace.subspace(Tuple.from(mapIndex)), key).thenAccept(wasPresent -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("CONTAINS_KEY", key, wasPresent).pack()));
} else {
// Remove a random key
int mapIndex = r.nextInt(mapCount);
Tuple key = Tuple.from(r.nextInt(keyCount));
op = workerMap.remove(tr, bmSubspace.subspace(Tuple.from(mapIndex)), key).thenAccept(oldValue -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("REMOVE", key, oldValue.orElse(null)).pack()));
}
return op.thenApply(ignore -> opCount.incrementAndGet() < opTotal);
}).thenCompose(vignore -> tr.commit()).handle((vignore, err) -> {
tr.close();
if (err != null) {
FDBException fdbE = unwrapException(err);
if (fdbE != null) {
if (fdbE.getCode() != FDBError.NOT_COMMITTED.code() && fdbE.getCode() != FDBError.TRANSACTION_TOO_OLD.code()) {
throw fdbE;
}
} else {
if (err instanceof RuntimeException) {
throw (RuntimeException) err;
} else {
throw new RuntimeException("verification error", err);
}
}
}
return trCount.incrementAndGet() < trTotal;
});
});
}).limit(workerCount).collect(Collectors.toList());
final AtomicBoolean stillWorking = new AtomicBoolean(true);
final CompletableFuture<Void> verifierWorker = AsyncUtil.whileTrue(() -> {
Transaction tr = db.createTransaction();
AtomicLong versionRef = new AtomicLong(-1L);
return tr.getReadVersion().thenCompose(version -> {
versionRef.set(version);
// Grab the mutation list.
AtomicInteger mapIndex = new AtomicInteger(0);
return AsyncUtil.whileTrue(() -> {
Subspace mapSubspace = bmSubspace.subspace(Tuple.from(mapIndex.get()));
Subspace mapLogSubspace = logSubspace.subspace(Tuple.from(mapIndex.get()));
CompletableFuture<List<Tuple>> logFuture = AsyncUtil.mapIterable(tr.getRange(mapLogSubspace.range()), kv -> Tuple.fromBytes(kv.getValue())).asList();
// Verify integrity and then grab all of the keys and values.
CompletableFuture<List<Map.Entry<Tuple, Tuple>>> contentFuture = AsyncUtil.collectRemaining(map.scan(tr, mapSubspace));
CompletableFuture<Void> integrityFuture = map.verifyIntegrity(tr, mapSubspace);
return integrityFuture.thenCompose(vignore -> contentFuture.thenCombine(logFuture, (mapContents, logEntries) -> {
Map<Tuple, Tuple> mapCopy = new TreeMap<>();
for (Tuple logEntry : logEntries) {
String op = logEntry.getString(0);
if (op.equals("PUT")) {
mapCopy.put(logEntry.getNestedTuple(1), logEntry.getNestedTuple(2));
} else if (op.equals("GET")) {
assertEquals(logEntry.getNestedTuple(2), mapCopy.get(logEntry.getNestedTuple(1)));
} else if (op.equals("CONTAINS_KEY")) {
assertEquals(logEntry.getBoolean(2), mapCopy.containsKey(logEntry.getNestedTuple(1)));
} else if (op.equals("REMOVE")) {
Tuple oldValue = mapCopy.remove(logEntry.getNestedTuple(1));
assertEquals(logEntry.getNestedTuple(2), oldValue);
} else {
fail("Unexpected operation " + op);
}
}
assertEquals(new ArrayList<>(mapCopy.entrySet()), mapContents);
return mapIndex.incrementAndGet() < mapCount;
})).handle((res, err) -> {
// Report error information unless it was just a transaction timeout (in which case we'll retry).
FDBException fdbE = unwrapException(err);
if (err != null && (fdbE == null || fdbE.getCode() != FDBError.TRANSACTION_TOO_OLD.code())) {
System.err.println("Error verifying consistency: " + err);
err.printStackTrace();
List<Map.Entry<Tuple, Tuple>> contents = contentFuture.join();
System.err.println("Map contents:");
contents.forEach(entry -> System.err.println(" " + entry.getKey() + " -> " + entry.getValue()));
System.err.println("DB contents:");
List<KeyValue> rangeKVs = tr.getRange(bmSubspace.range()).asList().join();
rangeKVs.forEach(kv -> {
Tuple boundaryKey = bmSubspace.unpack(kv.getKey());
System.err.println(" " + boundaryKey + " -> " + serializer.deserializeEntries(boundaryKey, kv.getValue()));
});
List<Tuple> logEntries = logFuture.join();
System.err.println("Log contents:");
logEntries.forEach(logEntry -> System.err.println(" " + logEntry));
if (err instanceof RuntimeException) {
throw (RuntimeException) err;
} else {
throw new LoggableException("unable to complete consistency check", err);
}
}
return res;
});
});
}).whenComplete((v, t) -> tr.close()).thenApply(vignore -> stillWorking.get());
});
AtomicInteger mapIndex = new AtomicInteger(0);
CompletableFuture<Void> compactingWorker = AsyncUtil.whileTrue(() -> {
AtomicReference<byte[]> continuation = new AtomicReference<>(null);
return AsyncUtil.whileTrue(() -> map.compact(db, bmSubspace.subspace(Tuple.from(mapIndex.get())), 5, continuation.get()).thenApply(nextContinuation -> {
continuation.set(nextContinuation);
return nextContinuation != null;
})).thenApply(vignore -> {
mapIndex.getAndUpdate(oldIndex -> (oldIndex + 1) % mapCount);
return stillWorking.get();
});
});
// Wait for all workers to stop working.
AsyncUtil.whenAll(workers).whenComplete((vignore, err) -> stillWorking.set(false)).thenAcceptBoth(verifierWorker, (vignore1, vignore2) -> {
}).thenAcceptBoth(compactingWorker, (vignore1, vignore2) -> {
}).whenComplete((vignore, err) -> {
System.out.printf("Completed stress test with %d workers, %d keys, and %d transactions %s (large values=%s).%n", workerCount, keyCount, globalTrCount.get() - initialTrCount, (err == null ? "successfully" : "with an error"), addBytesToValue);
if (err != null) {
err.printStackTrace();
}
for (int i = 0; i < mapCount; i++) {
System.out.println(" Map " + i + ":");
Subspace mapSubspace = bmSubspace.subspace(Tuple.from(i));
List<KeyValue> rangeKVs = inconsistentScan(db, mapSubspace);
System.out.println(" Boundary keys: " + rangeKVs.stream().map(kv -> mapSubspace.unpack(kv.getKey())).collect(Collectors.toList()));
System.out.println(" Boundary info:");
rangeKVs.forEach(kv -> {
Tuple boundaryKey = mapSubspace.unpack(kv.getKey());
System.out.printf(" %s: %d - %s%n", boundaryKey, serializer.deserializeEntries(boundaryKey, kv.getValue()).size(), serializer.deserializeKeys(boundaryKey, kv.getValue()));
});
}
int opsCount = inconsistentScan(db, logSubspace).size();
System.out.println(" Committed ops: " + opsCount);
}).get();
}
use of com.apple.foundationdb.Database in project fdb-record-layer by FoundationDB.
the class BunchedMapTest method concurrentLegalUpdates.
@Test
public void concurrentLegalUpdates() throws ExecutionException, InterruptedException {
final Tuple value = Tuple.from((Object) null);
// From initial database, essentially any two updates will cause each one
// to get its own key.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(1066L), value).join();
map.put(tr2, bmSubspace, Tuple.from(1415L), value).join();
}, true, Arrays.asList(Tuple.from(1066L), Tuple.from(1415L)));
try (Transaction tr = db.createTransaction()) {
tr.clear(bmSubspace.range());
tr.commit().get();
}
final List<Tuple> tuples = LongStream.range(100L, 115L).boxed().map(Tuple::from).collect(Collectors.toList());
db.run(tr -> {
tuples.forEach(t -> map.put(tr, bmSubspace, t, value).join());
return null;
});
// Case 1: Transaction reads the same key as another, but it
// doesn't actually need the part that is different.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(116L), value).join();
assertEquals(value, map.get(tr2, bmSubspace, Tuple.from(112L)).join().get());
}, true, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
// Case 2: Transaction reads the same key in a way while
// another transaction writes the same value into the key.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(105L), value).join();
assertEquals(value, map.get(tr2, bmSubspace, Tuple.from(105L)).join().get());
}, true, Arrays.asList(Tuple.from(100L), Tuple.from(110L)));
// Case 3: Transaction ranges read will overlap
runWithTwoTrs((tr1, tr2) -> {
// As the first one is full, the logic chooses to put (109L, null)
// as the first key of the second set of things.
map.put(tr1, bmSubspace, Tuple.from(109L, null), value).join();
// As the split is in the middle, it will choose to put
// (107L, null) in the first group of transactions.
map.put(tr2, bmSubspace, Tuple.from(107L, null), value).join();
}, true, Arrays.asList(Tuple.from(100L), Tuple.from(105L), Tuple.from(109L, null)));
try (Transaction tr = db.createTransaction()) {
map.verifyIntegrity(tr, bmSubspace).get();
// Fill up the (100L,) to (105L,) range.
LongStream.range(0L, 5L).boxed().map(l -> Tuple.from(104L, l)).forEach(t -> map.put(tr, bmSubspace, t, value).join());
tr.commit().get();
}
// Case 4: Read a value that is rewritten to the same value when appending
// to the beginning.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(104L, 100L), value).join();
assertEquals(value, map.get(tr2, bmSubspace, Tuple.from(107L)).join().get());
}, true, Arrays.asList(Tuple.from(100L), Tuple.from(104L, 100L), Tuple.from(109L, null)));
try (Transaction tr = db.createTransaction()) {
// Fill up (104L, 100L) to (109, null).
LongStream.range(101L, 104L).boxed().map(l -> Tuple.from(104L, l)).forEach(t -> map.put(tr, bmSubspace, t, value).join());
tr.commit().get();
}
// Case 5: Two things going in the middle of two filled ranges.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(104L, 42L), value).join();
map.put(tr2, bmSubspace, Tuple.from(104L, 43L), value).join();
}, true, Arrays.asList(Tuple.from(100L), Tuple.from(104L, 42L), Tuple.from(104L, 43L), Tuple.from(104L, 100L), Tuple.from(109L, null)));
// Case 6: Two keys before all filled ranges.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(42L), value).join();
map.put(tr2, bmSubspace, Tuple.from(43L), value).join();
}, true, Arrays.asList(Tuple.from(42L), Tuple.from(43L), Tuple.from(100L), Tuple.from(104L, 42L), Tuple.from(104L, 43L), Tuple.from(104L, 100L), Tuple.from(109L, null)));
try (Transaction tr = db.createTransaction()) {
// Fill up the last range.
LongStream.range(117L, 120L).boxed().map(Tuple::from).forEach(t -> map.put(tr, bmSubspace, t, value).join());
tr.commit().get();
}
// Case 7: Two keys after filled ranges.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(120L), value).join();
map.put(tr2, bmSubspace, Tuple.from(121L), value).join();
}, true, Arrays.asList(Tuple.from(42L), Tuple.from(43L), Tuple.from(100L), Tuple.from(104L, 42L), Tuple.from(104L, 43L), Tuple.from(104L, 100L), Tuple.from(109L, null), Tuple.from(120L), Tuple.from(121L)));
// Case 8: Adding to a full range while simultaneously adding something after the range.
runWithTwoTrs((tr1, tr2) -> {
map.put(tr1, bmSubspace, Tuple.from(102L, 0L), value).join();
map.put(tr2, bmSubspace, Tuple.from(104L, 41L), value).join();
}, true, Arrays.asList(Tuple.from(42L), Tuple.from(43L), Tuple.from(100L), Tuple.from(104L), Tuple.from(104L, 41L), Tuple.from(104L, 43L), Tuple.from(104L, 100L), Tuple.from(109L, null), Tuple.from(120L), Tuple.from(121L)));
// Compact the data to a minimal number of keys.
try (Transaction tr = db.createTransaction()) {
assertNull(map.compact(tr, bmSubspace, 0, null).get());
map.verifyIntegrity(tr, bmSubspace).get();
tr.commit().get();
}
verifyBoundaryKeys(Arrays.asList(Tuple.from(42L), Tuple.from(104L, 2L), Tuple.from(105L), Tuple.from(113L)));
}
use of com.apple.foundationdb.Database in project lionrock by panghy.
the class TestFDBVersionstamp method testActualFDBGetVersionstamp.
/**
* Check that actual FDB client also hangs when using runAsync with getVersionstamp.
*/
@Test
public void testActualFDBGetVersionstamp() {
Database fdb = FDB.selectAPIVersion(630).open();
CompletableFuture<byte[]> cf = fdb.runAsync(Transaction::getVersionstamp);
assertFalse(cf.isDone());
}
Aggregations