use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class ScopedDirectoryLayerTest method testLocatableDirectoryResolver.
@Test
public void testLocatableDirectoryResolver() {
KeySpace keySpace = new KeySpace(new KeySpaceDirectory("path", KeyType.STRING, "path").addSubdirectory(new KeySpaceDirectory("to", KeyType.STRING, "to").addSubdirectory(new KeySpaceDirectory("dirLayer", KeyType.STRING, "dirLayer"))));
ResolvedKeySpacePath path;
try (FDBRecordContext context = database.openContext()) {
path = keySpace.resolveFromKey(context, Tuple.from("path", "to", "dirLayer"));
}
LocatableResolver resolver = resolverFactory.create(path);
Long value = resolver.resolve("foo").join();
DirectoryLayer directoryLayer = new DirectoryLayer(new Subspace(Bytes.concat(path.toTuple().pack(), DirectoryLayer.DEFAULT_NODE_SUBSPACE.getKey())), path.toSubspace());
try (FDBRecordContext context = database.openContext()) {
validate(context, resolver, directoryLayer, "foo", value);
DirectoryLayer defaultDirectoryLayer = DirectoryLayer.getDefault();
List<String> defaultDirectories = defaultDirectoryLayer.list(context.ensureActive()).join();
assertThat("entry is not in the default directory layer", defaultDirectories, not(hasItem("foo")));
}
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class BunchedMapTest method stressTest.
private void stressTest(final Random r, final int trTotal, final int opTotal, final int keyCount, final int workerCount, boolean addBytesToValue, AtomicLong globalTrCount, int mapCount) throws InterruptedException, ExecutionException {
final long initialTrCount = globalTrCount.get();
final Subspace logSubspace = DirectoryLayer.getDefault().createOrOpen(db, PathUtil.from(getClass().getName(), "log")).get();
db.run(tr -> {
tr.clear(bmSubspace.range());
tr.clear(logSubspace.range());
// If the database is empty, putting these here stop scans from hitting the log subspace within a transaction
tr.set(logSubspace.getKey(), new byte[0]);
tr.set(ByteArrayUtil.join(logSubspace.getKey(), new byte[] { (byte) 0xff }), new byte[0]);
return null;
});
final List<CompletableFuture<Void>> workers = Stream.generate(() -> {
int bunchSize = r.nextInt(15) + 1;
BunchedMap<Tuple, Tuple> workerMap = new BunchedMap<>(serializer, Comparator.naturalOrder(), bunchSize);
AtomicInteger trCount = new AtomicInteger(0);
return AsyncUtil.whileTrue(() -> {
final Transaction tr = db.createTransaction();
tr.options().setDebugTransactionIdentifier("stress-tr-" + globalTrCount.getAndIncrement());
tr.options().setLogTransaction();
final AtomicInteger opCount = new AtomicInteger(0);
final AtomicInteger localOrder = new AtomicInteger(0);
return AsyncUtil.whileTrue(() -> {
int opCode = r.nextInt(4);
CompletableFuture<?> op;
if (opCode == 0) {
// Random put
CompletableFuture<?>[] futures = new CompletableFuture<?>[mapCount];
for (int i = 0; i < mapCount; i++) {
if (r.nextBoolean()) {
Tuple key = Tuple.from(r.nextInt(keyCount));
Tuple value;
if (addBytesToValue) {
int byteLength = r.nextInt(5000);
byte[] bytes = new byte[byteLength];
r.nextBytes(bytes);
value = Tuple.from(r.nextLong(), bytes);
} else {
value = Tuple.from(r.nextLong());
}
tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, i, localOrder), Tuple.from("PUT", key, value).pack());
futures[i] = workerMap.put(tr, bmSubspace.subspace(Tuple.from(i)), key, value);
} else {
futures[i] = AsyncUtil.DONE;
}
}
op = CompletableFuture.allOf(futures);
} else if (opCode == 1) {
// Read a random key.
int mapIndex = r.nextInt(mapCount);
Tuple key = Tuple.from(r.nextInt(keyCount));
op = workerMap.get(tr, bmSubspace.get(mapIndex), key).thenAccept(optionalValue -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("GET", key, optionalValue.orElse(null)).pack()));
} else if (opCode == 2) {
// Check contains key
int mapIndex = r.nextInt(mapCount);
Tuple key = Tuple.from(r.nextInt(keyCount));
op = workerMap.containsKey(tr, bmSubspace.subspace(Tuple.from(mapIndex)), key).thenAccept(wasPresent -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("CONTAINS_KEY", key, wasPresent).pack()));
} else {
// Remove a random key
int mapIndex = r.nextInt(mapCount);
Tuple key = Tuple.from(r.nextInt(keyCount));
op = workerMap.remove(tr, bmSubspace.subspace(Tuple.from(mapIndex)), key).thenAccept(oldValue -> tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, getLogKey(logSubspace, mapIndex, localOrder), Tuple.from("REMOVE", key, oldValue.orElse(null)).pack()));
}
return op.thenApply(ignore -> opCount.incrementAndGet() < opTotal);
}).thenCompose(vignore -> tr.commit()).handle((vignore, err) -> {
tr.close();
if (err != null) {
FDBException fdbE = unwrapException(err);
if (fdbE != null) {
if (fdbE.getCode() != FDBError.NOT_COMMITTED.code() && fdbE.getCode() != FDBError.TRANSACTION_TOO_OLD.code()) {
throw fdbE;
}
} else {
if (err instanceof RuntimeException) {
throw (RuntimeException) err;
} else {
throw new RuntimeException("verification error", err);
}
}
}
return trCount.incrementAndGet() < trTotal;
});
});
}).limit(workerCount).collect(Collectors.toList());
final AtomicBoolean stillWorking = new AtomicBoolean(true);
final CompletableFuture<Void> verifierWorker = AsyncUtil.whileTrue(() -> {
Transaction tr = db.createTransaction();
AtomicLong versionRef = new AtomicLong(-1L);
return tr.getReadVersion().thenCompose(version -> {
versionRef.set(version);
// Grab the mutation list.
AtomicInteger mapIndex = new AtomicInteger(0);
return AsyncUtil.whileTrue(() -> {
Subspace mapSubspace = bmSubspace.subspace(Tuple.from(mapIndex.get()));
Subspace mapLogSubspace = logSubspace.subspace(Tuple.from(mapIndex.get()));
CompletableFuture<List<Tuple>> logFuture = AsyncUtil.mapIterable(tr.getRange(mapLogSubspace.range()), kv -> Tuple.fromBytes(kv.getValue())).asList();
// Verify integrity and then grab all of the keys and values.
CompletableFuture<List<Map.Entry<Tuple, Tuple>>> contentFuture = AsyncUtil.collectRemaining(map.scan(tr, mapSubspace));
CompletableFuture<Void> integrityFuture = map.verifyIntegrity(tr, mapSubspace);
return integrityFuture.thenCompose(vignore -> contentFuture.thenCombine(logFuture, (mapContents, logEntries) -> {
Map<Tuple, Tuple> mapCopy = new TreeMap<>();
for (Tuple logEntry : logEntries) {
String op = logEntry.getString(0);
if (op.equals("PUT")) {
mapCopy.put(logEntry.getNestedTuple(1), logEntry.getNestedTuple(2));
} else if (op.equals("GET")) {
assertEquals(logEntry.getNestedTuple(2), mapCopy.get(logEntry.getNestedTuple(1)));
} else if (op.equals("CONTAINS_KEY")) {
assertEquals(logEntry.getBoolean(2), mapCopy.containsKey(logEntry.getNestedTuple(1)));
} else if (op.equals("REMOVE")) {
Tuple oldValue = mapCopy.remove(logEntry.getNestedTuple(1));
assertEquals(logEntry.getNestedTuple(2), oldValue);
} else {
fail("Unexpected operation " + op);
}
}
assertEquals(new ArrayList<>(mapCopy.entrySet()), mapContents);
return mapIndex.incrementAndGet() < mapCount;
})).handle((res, err) -> {
// Report error information unless it was just a transaction timeout (in which case we'll retry).
FDBException fdbE = unwrapException(err);
if (err != null && (fdbE == null || fdbE.getCode() != FDBError.TRANSACTION_TOO_OLD.code())) {
System.err.println("Error verifying consistency: " + err);
err.printStackTrace();
List<Map.Entry<Tuple, Tuple>> contents = contentFuture.join();
System.err.println("Map contents:");
contents.forEach(entry -> System.err.println(" " + entry.getKey() + " -> " + entry.getValue()));
System.err.println("DB contents:");
List<KeyValue> rangeKVs = tr.getRange(bmSubspace.range()).asList().join();
rangeKVs.forEach(kv -> {
Tuple boundaryKey = bmSubspace.unpack(kv.getKey());
System.err.println(" " + boundaryKey + " -> " + serializer.deserializeEntries(boundaryKey, kv.getValue()));
});
List<Tuple> logEntries = logFuture.join();
System.err.println("Log contents:");
logEntries.forEach(logEntry -> System.err.println(" " + logEntry));
if (err instanceof RuntimeException) {
throw (RuntimeException) err;
} else {
throw new LoggableException("unable to complete consistency check", err);
}
}
return res;
});
});
}).whenComplete((v, t) -> tr.close()).thenApply(vignore -> stillWorking.get());
});
AtomicInteger mapIndex = new AtomicInteger(0);
CompletableFuture<Void> compactingWorker = AsyncUtil.whileTrue(() -> {
AtomicReference<byte[]> continuation = new AtomicReference<>(null);
return AsyncUtil.whileTrue(() -> map.compact(db, bmSubspace.subspace(Tuple.from(mapIndex.get())), 5, continuation.get()).thenApply(nextContinuation -> {
continuation.set(nextContinuation);
return nextContinuation != null;
})).thenApply(vignore -> {
mapIndex.getAndUpdate(oldIndex -> (oldIndex + 1) % mapCount);
return stillWorking.get();
});
});
// Wait for all workers to stop working.
AsyncUtil.whenAll(workers).whenComplete((vignore, err) -> stillWorking.set(false)).thenAcceptBoth(verifierWorker, (vignore1, vignore2) -> {
}).thenAcceptBoth(compactingWorker, (vignore1, vignore2) -> {
}).whenComplete((vignore, err) -> {
System.out.printf("Completed stress test with %d workers, %d keys, and %d transactions %s (large values=%s).%n", workerCount, keyCount, globalTrCount.get() - initialTrCount, (err == null ? "successfully" : "with an error"), addBytesToValue);
if (err != null) {
err.printStackTrace();
}
for (int i = 0; i < mapCount; i++) {
System.out.println(" Map " + i + ":");
Subspace mapSubspace = bmSubspace.subspace(Tuple.from(i));
List<KeyValue> rangeKVs = inconsistentScan(db, mapSubspace);
System.out.println(" Boundary keys: " + rangeKVs.stream().map(kv -> mapSubspace.unpack(kv.getKey())).collect(Collectors.toList()));
System.out.println(" Boundary info:");
rangeKVs.forEach(kv -> {
Tuple boundaryKey = mapSubspace.unpack(kv.getKey());
System.out.printf(" %s: %d - %s%n", boundaryKey, serializer.deserializeEntries(boundaryKey, kv.getValue()).size(), serializer.deserializeKeys(boundaryKey, kv.getValue()));
});
}
int opsCount = inconsistentScan(db, logSubspace).size();
System.out.println(" Committed ops: " + opsCount);
}).get();
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class FDBReverseDirectoryCache method populate.
private void populate(final FDBRecordContext initialContext, Subspace directory) {
// Range for the directory layer. WARNING, this assumes a bunch about the internals of the
// directory layer and may want to be re-worked at some point using the DirectoryLayer API's.
final byte[] prefix = { (byte) 0xFE };
final Subspace subdirs = new Subspace(Tuple.from(prefix, 0L), prefix);
fdb.asyncToSync(initialContext.getTimer(), FDBStoreTimer.Waits.WAIT_REVERSE_DIRECTORY_SCAN, populate(initialContext, subdirs, directory, null));
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method scanTypedRecords.
@Nonnull
public <M extends Message> RecordCursor<FDBStoredRecord<M>> scanTypedRecords(@Nonnull RecordSerializer<M> typedSerializer, @Nullable final Tuple low, @Nullable final Tuple high, @Nonnull final EndpointType lowEndpoint, @Nonnull final EndpointType highEndpoint, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
final RecordMetaData metaData = metaDataProvider.getRecordMetaData();
final Subspace recordsSubspace = recordsSubspace();
final SplitHelper.SizeInfo sizeInfo = new SplitHelper.SizeInfo();
final RecordCursor<FDBRawRecord> rawRecords;
if (metaData.isSplitLongRecords()) {
RecordCursor<KeyValue> keyValues = KeyValueCursor.Builder.withSubspace(recordsSubspace).setContext(context).setContinuation(continuation).setLow(low, lowEndpoint).setHigh(high, highEndpoint).setScanProperties(scanProperties.with(ExecuteProperties::clearRowAndTimeLimits).with(ExecuteProperties::clearState)).build();
rawRecords = new SplitHelper.KeyValueUnsplitter(context, recordsSubspace, keyValues, useOldVersionFormat(), sizeInfo, scanProperties.isReverse(), new CursorLimitManager(context, scanProperties.with(ExecuteProperties::clearReturnedRowLimit))).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
} else {
KeyValueCursor.Builder keyValuesBuilder = KeyValueCursor.Builder.withSubspace(recordsSubspace).setContext(context).setContinuation(continuation).setLow(low, lowEndpoint).setHigh(high, highEndpoint);
if (omitUnsplitRecordSuffix) {
rawRecords = keyValuesBuilder.setScanProperties(scanProperties).build().map(kv -> {
sizeInfo.set(kv);
Tuple primaryKey = SplitHelper.unpackKey(recordsSubspace, kv);
return new FDBRawRecord(primaryKey, kv.getValue(), null, sizeInfo);
});
} else {
final ScanProperties finalScanProperties = scanProperties.with(executeProperties -> {
final ExecuteProperties.Builder builder = executeProperties.toBuilder().clearTimeLimit().clearSkipAndAdjustLimit().clearState();
int returnedRowLimit = builder.getReturnedRowLimitOrMax();
if (returnedRowLimit != Integer.MAX_VALUE) {
// Adjust limit to twice the supplied limit in case there are versions in the records
builder.setReturnedRowLimit(2 * returnedRowLimit);
}
return builder.build();
});
rawRecords = new SplitHelper.KeyValueUnsplitter(context, recordsSubspace, keyValuesBuilder.setScanProperties(finalScanProperties).build(), useOldVersionFormat(), sizeInfo, scanProperties.isReverse(), new CursorLimitManager(context, scanProperties.with(ExecuteProperties::clearReturnedRowLimit))).skip(scanProperties.getExecuteProperties().getSkip()).limitRowsTo(scanProperties.getExecuteProperties().getReturnedRowLimit());
}
}
RecordCursor<FDBStoredRecord<M>> result = rawRecords.mapPipelined(rawRecord -> {
final Optional<CompletableFuture<FDBRecordVersion>> versionFutureOptional;
if (useOldVersionFormat()) {
// Older format versions: do a separate read to get the version.
versionFutureOptional = loadRecordVersionAsync(rawRecord.getPrimaryKey(), scanProperties.getExecuteProperties().getIsolationLevel().isSnapshot());
} else {
// Newer format versions: the version is either in the record or it is not -- do not do another read.
versionFutureOptional = Optional.empty();
}
return deserializeRecord(typedSerializer, rawRecord, metaData, versionFutureOptional);
}, pipelineSizer.getPipelineSize(PipelineOperation.KEY_TO_RECORD));
return context.instrument(FDBStoreTimer.Events.SCAN_RECORDS, result);
}
use of com.apple.foundationdb.subspace.Subspace in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method addConvertRecordVersions.
private void addConvertRecordVersions(@Nonnull List<CompletableFuture<Void>> work) {
if (useOldVersionFormat()) {
throw recordCoreException("attempted to convert record versions when still using older format");
}
final Subspace legacyVersionSubspace = getLegacyVersionSubspace();
// Read all of the keys in the old record version location. For each
// record, copy its version to the new location within the primary record
// subspace. Then once they are all copied, delete the old subspace.
KeyValueCursor kvCursor = KeyValueCursor.Builder.withSubspace(legacyVersionSubspace).setContext(getRecordContext()).setScanProperties(ScanProperties.FORWARD_SCAN).build();
CompletableFuture<Void> workFuture = kvCursor.forEach(kv -> {
final Tuple primaryKey = legacyVersionSubspace.unpack(kv.getKey());
final FDBRecordVersion version = FDBRecordVersion.fromBytes(kv.getValue(), false);
final byte[] newKeyBytes = getSubspace().pack(recordVersionKey(primaryKey));
final byte[] newValueBytes = SplitHelper.packVersion(version);
ensureContextActive().set(newKeyBytes, newValueBytes);
}).thenAccept(ignore -> ensureContextActive().clear(legacyVersionSubspace.range()));
work.add(workFuture);
}
Aggregations