use of com.apple.foundationdb.async.AsyncIterator in project fdb-record-layer by FoundationDB.
the class IndexingScrubMissing method scrubRecordsRangeOnly.
@Nonnull
private CompletableFuture<Boolean> scrubRecordsRangeOnly(@Nonnull FDBRecordStore store, byte[] startBytes, byte[] endBytes, @Nonnull AtomicLong recordsScanned) {
// return false when done
Index index = common.getIndex();
final RecordMetaData metaData = store.getRecordMetaData();
final RecordMetaDataProvider recordMetaDataProvider = common.getRecordStoreBuilder().getMetaDataProvider();
if (recordMetaDataProvider == null || !metaData.equals(recordMetaDataProvider.getRecordMetaData())) {
throw new MetaDataException("Store does not have the same metadata");
}
final IndexMaintainer maintainer = store.getIndexMaintainer(index);
// scrubbing only readable, VALUE, idempotence indexes (at least for now)
validateOrThrowEx(maintainer.isIdempotent(), "scrubbed index is not idempotent");
validateOrThrowEx(index.getType().equals(IndexTypes.VALUE) || scrubbingPolicy.ignoreIndexTypeCheck(), "scrubbed index is not a VALUE index");
validateOrThrowEx(store.getIndexState(index) == IndexState.READABLE, "scrubbed index is not readable");
RangeSet rangeSet = new RangeSet(indexScrubRecordsRangeSubspace(store, index));
AsyncIterator<Range> ranges = rangeSet.missingRanges(store.ensureContextActive(), startBytes, endBytes).iterator();
final ExecuteProperties.Builder executeProperties = ExecuteProperties.newBuilder().setIsolationLevel(IsolationLevel.SNAPSHOT).setReturnedRowLimit(// always respectLimit in this path; +1 allows a continuation item
getLimit() + 1);
final ScanProperties scanProperties = new ScanProperties(executeProperties.build());
return ranges.onHasNext().thenCompose(hasNext -> {
if (Boolean.FALSE.equals(hasNext)) {
// Here: no more missing ranges - all done
// To avoid stale metadata, we'll keep the scrubbed-ranges indicator empty until the next scrub call.
Transaction tr = store.getContext().ensureActive();
tr.clear(indexScrubRecordsRangeSubspace(store, index).range());
return AsyncUtil.READY_FALSE;
}
final Range range = ranges.next();
final Tuple rangeStart = RangeSet.isFirstKey(range.begin) ? null : Tuple.fromBytes(range.begin);
final Tuple rangeEnd = RangeSet.isFinalKey(range.end) ? null : Tuple.fromBytes(range.end);
final TupleRange tupleRange = TupleRange.between(rangeStart, rangeEnd);
final RecordCursor<FDBStoredRecord<Message>> cursor = store.scanRecords(tupleRange, null, scanProperties);
final AtomicBoolean hasMore = new AtomicBoolean(true);
final AtomicReference<RecordCursorResult<FDBStoredRecord<Message>>> lastResult = new AtomicReference<>(RecordCursorResult.exhausted());
final long scanLimit = scrubbingPolicy.getEntriesScanLimit();
// Note that currently we only scrub idempotent indexes
final boolean isIdempotent = true;
return iterateRangeOnly(store, cursor, this::getRecordIfMissingIndex, lastResult, hasMore, recordsScanned, isIdempotent).thenApply(vignore -> hasMore.get() ? lastResult.get().get().getPrimaryKey() : rangeEnd).thenCompose(cont -> rangeSet.insertRange(store.ensureContextActive(), packOrNull(rangeStart), packOrNull(cont), true).thenApply(ignore -> {
if (scanLimit > 0) {
scanCounter += recordsScanned.get();
if (scanLimit <= scanCounter) {
return false;
}
}
return !Objects.equals(cont, rangeEnd);
}));
});
}
use of com.apple.foundationdb.async.AsyncIterator in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method testMarkReadableClearsBuiltRanges.
@Test
public void testMarkReadableClearsBuiltRanges() {
List<TestRecords1Proto.MySimpleRecord> records = LongStream.range(0, 200).mapToObj(val -> TestRecords1Proto.MySimpleRecord.newBuilder().setRecNo(val).setNumValue2((int) val + 1).build()).collect(Collectors.toList());
Index index = new Index("newIndex", field("num_value_2").ungrouped(), IndexTypes.SUM);
IndexAggregateFunction aggregateFunction = new IndexAggregateFunction(FunctionNames.SUM, index.getRootExpression(), index.getName());
List<String> indexTypes = Collections.singletonList("MySimpleRecord");
FDBRecordStoreTestBase.RecordMetaDataHook hook = metaDataBuilder -> metaDataBuilder.addIndex("MySimpleRecord", index);
openSimpleMetaData();
try (FDBRecordContext context = openContext()) {
records.forEach(recordStore::saveRecord);
context.commit();
}
openSimpleMetaData(hook);
try (OnlineIndexer indexer = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).build()) {
indexer.buildIndex(true);
}
openSimpleMetaData(hook);
try (FDBRecordContext context = openContext()) {
// Verify rangeSet is cleared when index is marked readable
final RangeSet rangeSet = new RangeSet(recordStore.indexRangeSubspace(index));
AsyncIterator<Range> ranges = rangeSet.missingRanges(recordStore.ensureContextActive()).iterator();
final Range range = ranges.next();
final boolean range1IsEmpty = RangeSet.isFirstKey(range.begin) && RangeSet.isFinalKey(range.end);
assertTrue(range1IsEmpty);
// fake commit, happy compiler
context.commit();
}
}
use of com.apple.foundationdb.async.AsyncIterator in project lionrock by panghy.
the class FoundationDbGrpcFacade method executeTransaction.
@Override
public StreamObserver<StreamingDatabaseRequest> executeTransaction(StreamObserver<StreamingDatabaseResponse> responseObserver) {
Context rpcContext = Context.current();
Span overallSpan = this.tracer.currentSpan();
return new StreamObserver<>() {
private final AtomicReference<StartTransactionRequest> startRequest = new AtomicReference<>();
private final AtomicBoolean commitStarted = new AtomicBoolean();
private final AtomicLong rowsWritten = new AtomicLong();
private final AtomicLong rowsMutated = new AtomicLong();
private final AtomicLong rowsRead = new AtomicLong();
private final AtomicLong keysRead = new AtomicLong();
private final AtomicLong getReadVersion = new AtomicLong();
private final AtomicLong rangeGets = new AtomicLong();
private final AtomicLong rangeGetBatches = new AtomicLong();
private final AtomicLong clears = new AtomicLong();
private final AtomicLong readConflictAdds = new AtomicLong();
private final AtomicLong writeConflictAdds = new AtomicLong();
private final AtomicLong getVersionstamp = new AtomicLong();
private final AtomicLong getApproximateSize = new AtomicLong();
private final AtomicLong getEstimatedRangeSize = new AtomicLong();
private final AtomicLong getBoundaryKeys = new AtomicLong();
private final AtomicLong getAddressesForKey = new AtomicLong();
private final Set<Long> knownSequenceIds = Collections.newSetFromMap(new ConcurrentHashMap<>());
private volatile Transaction tx;
/**
* Long-living futures that might last beyond the open->commit() lifecycle of a transaction(e.g. getVersionStamp
* and watch).
*/
private final List<CompletableFuture<?>> longLivingFutures = new ArrayList<>();
@Override
public void onNext(StreamingDatabaseRequest value) {
if (value.hasStartTransaction()) {
StartTransactionRequest startRequest = this.startRequest.updateAndGet(startTransactionRequest -> {
if (startTransactionRequest != null) {
StatusRuntimeException toThrow = Status.INVALID_ARGUMENT.withDescription("cannot send StartTransactionRequest twice").asRuntimeException();
synchronized (responseObserver) {
responseObserver.onError(toThrow);
if (tx != null) {
tx.close();
}
}
throw toThrow;
}
return value.getStartTransaction();
});
if (logger.isDebugEnabled()) {
String msg = "Starting transaction " + startRequest.getName() + " against db: " + startRequest.getDatabaseName();
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
Database db = databaseMap.get(startRequest.getDatabaseName());
if (db == null) {
StatusRuntimeException toThrow = Status.INVALID_ARGUMENT.withDescription("cannot find database named: " + startRequest.getDatabaseName()).asRuntimeException();
synchronized (responseObserver) {
responseObserver.onError(toThrow);
if (tx != null) {
tx.close();
}
}
throw toThrow;
}
tx = db.createTransaction();
setDeadline(rpcContext, tx);
if (overallSpan != null) {
overallSpan.tag("client", startRequest.getClientIdentifier()).tag("database_name", startRequest.getDatabaseName()).tag("name", startRequest.getName());
}
} else if (value.hasCommitTransaction()) {
hasActiveTransactionOrThrow();
if (logger.isDebugEnabled()) {
if (overallSpan != null) {
overallSpan.event("CommitTransactionRequest");
}
logger.debug("CommitTransactionRequest");
}
if (commitStarted.getAndSet(true)) {
StatusRuntimeException toThrow = Status.INVALID_ARGUMENT.withDescription("transaction already committed").asRuntimeException();
responseObserver.onError(toThrow);
throw toThrow;
}
if (overallSpan != null) {
overallSpan.tag("commit", "true");
}
// start the span and scope for the commit transaction call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.commit_transaction");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
CompletableFuture<byte[]> versionstampF = tx.getVersionstamp();
handleException(tx.commit().thenCompose(x -> versionstampF.exceptionally(ex -> null)), opSpan, responseObserver, "failed to commit transaction").whenComplete((versionstamp, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable == null) {
synchronized (responseObserver) {
CommitTransactionResponse.Builder builder = CommitTransactionResponse.newBuilder().setCommittedVersion(tx.getCommittedVersion());
if (versionstamp != null) {
builder.setVersionstamp(ByteString.copyFrom(versionstamp));
}
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setCommitTransaction(builder.build()).build());
}
if (logger.isDebugEnabled()) {
String msg = "Committed transaction: " + tx.getCommittedVersion();
opSpan.event(msg);
logger.debug(msg);
}
// terminate the connection to the client when all long living futures are done.
CompletableFuture.allOf(longLivingFutures.toArray(CompletableFuture[]::new)).whenComplete((val, y) -> {
logger.debug("server onCompleted()");
synchronized (responseObserver) {
responseObserver.onCompleted();
}
if (tx != null) {
tx.close();
}
});
} else {
// throwable != null
populateOverallSpanStats();
if (tx != null) {
tx.close();
}
}
}
opScope.close();
opSpan.end();
});
} else if (value.hasGetValue()) {
hasActiveTransactionOrThrow();
GetValueRequest req = value.getGetValue();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
if (logger.isDebugEnabled()) {
String msg = "GetValueRequest on: " + printable(value.getGetValue().getKey().toByteArray());
if (overallSpan != null) {
overallSpan.event(msg);
}
logger.debug(msg);
}
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_value");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
CompletableFuture<byte[]> getFuture = req.getSnapshot() ? tx.snapshot().get(req.getKey().toByteArray()) : tx.get(req.getKey().toByteArray());
getFuture.whenComplete((val, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get value for key: " + printable(req.getKey().toByteArray()));
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "GetValueRequest on: " + printable(req.getKey().toByteArray()) + " is: " + printable(val) + " seq_id: " + req.getSequenceId();
logger.debug(msg);
opSpan.event(msg);
}
rowsRead.incrementAndGet();
GetValueResponse.Builder build = GetValueResponse.newBuilder().setSequenceId(req.getSequenceId());
if (val != null) {
build.setValue(ByteString.copyFrom(val));
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetValue(build.build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
});
} else if (value.hasGetKey()) {
hasActiveTransactionOrThrow();
GetKeyRequest req = value.getGetKey();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
io.github.panghy.lionrock.proto.KeySelector ks = req.getKeySelector();
KeySelector keySelector = new KeySelector(ks.getKey().toByteArray(), ks.getOrEqual(), ks.getOffset());
if (logger.isDebugEnabled()) {
String msg = "GetKey for: " + keySelector;
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_key");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
CompletableFuture<byte[]> getFuture = req.getSnapshot() ? tx.snapshot().getKey(keySelector) : tx.getKey(keySelector);
getFuture.whenComplete((val, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get key: " + keySelector);
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "GetKey on: " + keySelector + " is: " + printable(val);
logger.debug(msg);
opSpan.event(msg);
}
keysRead.incrementAndGet();
GetKeyResponse.Builder build = GetKeyResponse.newBuilder().setSequenceId(req.getSequenceId());
if (val != null) {
build.setKey(ByteString.copyFrom(val));
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetKey(build.build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
});
} else if (value.hasSetValue()) {
hasActiveTransactionOrThrow();
SetValueRequest req = value.getSetValue();
setValue(req);
} else if (value.hasClearKey()) {
hasActiveTransactionOrThrow();
ClearKeyRequest req = value.getClearKey();
clearKey(req);
} else if (value.hasClearRange()) {
hasActiveTransactionOrThrow();
ClearKeyRangeRequest req = value.getClearRange();
clearRange(req);
} else if (value.hasGetRange()) {
hasActiveTransactionOrThrow();
GetRangeRequest req = value.getGetRange();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
rangeGets.incrementAndGet();
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_range").start();
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan.start())) {
KeySelector start;
if (req.hasStartBytes()) {
start = new KeySelector(req.getStartBytes().toByteArray(), false, 1);
} else {
start = new KeySelector(req.getStartKeySelector().getKey().toByteArray(), req.getStartKeySelector().getOrEqual(), req.getStartKeySelector().getOffset());
}
KeySelector end;
if (req.hasEndBytes()) {
end = new KeySelector(req.getEndBytes().toByteArray(), false, 1);
} else {
end = new KeySelector(req.getEndKeySelector().getKey().toByteArray(), req.getEndKeySelector().getOrEqual(), req.getEndKeySelector().getOffset());
}
if (logger.isDebugEnabled()) {
String msg = "GetRangeRequest from: " + start + " to: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode();
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
StreamingMode mode = StreamingMode.ITERATOR;
switch(req.getStreamingMode()) {
case WANT_ALL:
mode = StreamingMode.WANT_ALL;
break;
case EXACT:
mode = StreamingMode.EXACT;
break;
}
AsyncIterable<KeyValue> range = req.getSnapshot() ? tx.snapshot().getRange(start, end, req.getLimit(), req.getReverse(), mode) : tx.getRange(start, end, req.getLimit(), req.getReverse(), mode);
if (config.getInternal().isUseAsListForRangeGets()) {
// asList() method.
handleRangeGetWithAsList(req, opSpan, start, end, range);
} else {
// iterator method.
handleRangeGetWithAsyncIterator(value, req, opSpan, start, end, range);
}
}
} else if (value.hasAddConflictKey()) {
hasActiveTransactionOrThrow();
AddConflictKeyRequest req = value.getAddConflictKey();
addConflictKey(req);
} else if (value.hasAddConflictRange()) {
hasActiveTransactionOrThrow();
AddConflictRangeRequest req = value.getAddConflictRange();
addConflictRange(req);
} else if (value.hasGetReadVersion()) {
hasActiveTransactionOrThrow();
GetReadVersionRequest req = value.getGetReadVersion();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
if (logger.isDebugEnabled()) {
logger.debug("GetReadVersion");
if (overallSpan != null) {
overallSpan.event("GetReadVersion");
}
}
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_read_version");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
tx.getReadVersion().whenComplete((val, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get read version");
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "GetReadVersion is: " + val + " seq_id: " + req.getSequenceId();
logger.debug(msg);
opSpan.event(msg);
}
this.getReadVersion.incrementAndGet();
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetReadVersion(GetReadVersionResponse.newBuilder().setReadVersion(val).setSequenceId(req.getSequenceId()).build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
});
} else if (value.hasSetReadVersion()) {
hasActiveTransactionOrThrow();
if (logger.isDebugEnabled()) {
String msg = "SetReadVersion at: " + value.getSetReadVersion().getReadVersion();
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
tx.setReadVersion(value.getSetReadVersion().getReadVersion());
} else if (value.hasSetTransactionOption()) {
hasActiveTransactionOrThrow();
if (logger.isDebugEnabled()) {
String msg = "SetTransactionOption: " + value.getSetTransactionOption().getOption() + " with: " + (value.getSetTransactionOption().hasParam() ? printable(value.getSetTransactionOption().getParam().toByteArray()) : null);
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
if (value.getSetTransactionOption().hasParam()) {
tx.options().getOptionConsumer().setOption(value.getSetTransactionOption().getOption(), value.getSetTransactionOption().getParam().toByteArray());
} else {
tx.options().getOptionConsumer().setOption(value.getSetTransactionOption().getOption(), null);
}
} else if (value.hasMutateValue()) {
hasActiveTransactionOrThrow();
MutateValueRequest req = value.getMutateValue();
mutateValue(req);
} else if (value.hasWatchKey()) {
hasActiveTransactionOrThrow();
WatchKeyRequest req = value.getWatchKey();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
if (logger.isDebugEnabled()) {
String msg = "WatchKeyRequest for: " + printable(req.getKey().toByteArray());
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.watch_key");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
getVersionstamp.incrementAndGet();
addLongLivingFuture(tx.watch(req.getKey().toByteArray()).whenComplete((vs, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to watch key");
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "WatchKeyRequest Completed for: " + printable(req.getKey().toByteArray());
logger.debug(msg);
opSpan.event(msg);
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setWatchKey(WatchKeyResponse.newBuilder().setSequenceId(req.getSequenceId()).build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
}));
} else if (value.hasGetApproximateSize()) {
hasActiveTransactionOrThrow();
GetApproximateSizeRequest req = value.getGetApproximateSize();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
if (logger.isDebugEnabled()) {
String msg = "GetApproximateSizeRequest";
if (overallSpan != null) {
overallSpan.event(msg);
}
logger.debug(msg);
}
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_approximate_size");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
getApproximateSize.incrementAndGet();
tx.getApproximateSize().whenComplete((val, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get approximate size");
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "GetApproximateSize is: " + val;
logger.debug(msg);
opSpan.event(msg);
}
GetApproximateSizeResponse.Builder build = GetApproximateSizeResponse.newBuilder().setSequenceId(req.getSequenceId());
if (val != null) {
build.setSize(val);
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetApproximateSize(build.build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
});
} else if (value.hasGetEstimatedRangeSize()) {
hasActiveTransactionOrThrow();
GetEstimatedRangeSizeRequest req = value.getGetEstimatedRangeSize();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
byte[] startB = req.getStart().toByteArray();
byte[] endB = req.getEnd().toByteArray();
if (logger.isDebugEnabled()) {
String msg = "GetEstimatedRangeSize for start: " + printable(startB) + " end: " + printable(endB);
if (overallSpan != null) {
overallSpan.event(msg);
}
logger.debug(msg);
}
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_estimated_range_size");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
this.getEstimatedRangeSize.incrementAndGet();
tx.getEstimatedRangeSizeBytes(startB, endB).whenComplete((val, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get estimated range size");
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "GetEstimatedRangeSize for start: " + printable(startB) + " end: " + printable(endB) + " is: " + val;
logger.debug(msg);
opSpan.event(msg);
}
GetEstimatedRangeSizeResponse.Builder build = GetEstimatedRangeSizeResponse.newBuilder().setSequenceId(req.getSequenceId());
if (val != null) {
build.setSize(val);
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetEstimatedRangeSize(build.build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
});
} else if (value.hasGetBoundaryKeys()) {
hasActiveTransactionOrThrow();
GetBoundaryKeysRequest req = value.getGetBoundaryKeys();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
getBoundaryKeys.incrementAndGet();
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_boundary_keys").start();
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan.start())) {
byte[] startB = req.getStart().toByteArray();
byte[] endB = req.getEnd().toByteArray();
if (logger.isDebugEnabled()) {
String msg = "GetBoundaryKeysRequest from: " + printable(startB) + " to: " + printable(endB);
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
CloseableAsyncIterator<byte[]> iterator = LocalityUtil.getBoundaryKeys(tx, startB, endB);
// consumer that collects key values and returns them to the user.
AtomicLong rows = new AtomicLong();
AtomicLong batches = new AtomicLong();
BiConsumer<Boolean, Throwable> hasNextConsumer = new BiConsumer<>() {
private final List<ByteString> boundaries = new ArrayList<>();
@Override
public void accept(Boolean hasNext, Throwable throwable) {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
boolean done = false;
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get boundary keys for start: " + printable(startB) + " end: " + printable(endB));
OperationFailureResponse.Builder builder = OperationFailureResponse.newBuilder().setSequenceId(value.getGetRange().getSequenceId()).setMessage(throwable.getMessage());
if (throwable instanceof FDBException) {
builder.setCode(((FDBException) throwable).getCode());
}
iterator.close();
opSpan.tag("rows_read", String.valueOf(rows.get()));
opSpan.tag("batches", String.valueOf(batches.get()));
opSpan.end();
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setOperationFailure(builder.build()).build());
}
return;
} else if (!hasNext) {
// no more rows to read, flush the last message.
done = true;
} else {
// spool everything until the onHasNext CompletableFuture is pending.
while (iterator.onHasNext().isDone() && !iterator.onHasNext().isCompletedExceptionally()) {
if (!iterator.hasNext()) {
done = true;
break;
}
byte[] next = iterator.next();
rows.incrementAndGet();
synchronized (boundaries) {
boundaries.add(ByteString.copyFrom(next));
}
}
}
// flush what we have.
flush(done);
if (done) {
iterator.close();
opSpan.tag("rows_read", String.valueOf(rows.get()));
opSpan.tag("batches", String.valueOf(batches.get()));
opSpan.end();
} else {
// schedule the callback on when the future is ready.
iterator.onHasNext().whenComplete(this);
}
}
}
private void flush(boolean done) {
if (!done && boundaries.isEmpty()) {
return;
}
batches.incrementAndGet();
rangeGetBatches.incrementAndGet();
rowsRead.addAndGet(boundaries.size());
if (logger.isDebugEnabled()) {
String msg = "GetBoundaryKeysRequest from: " + printable(startB) + " to: " + printable(endB) + ", flushing: " + boundaries.size() + " boundaries, done: " + done;
logger.debug(msg);
opSpan.event(msg);
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetBoundaryKeys(GetBoundaryKeysResponse.newBuilder().setDone(done).addAllKeys(boundaries).setSequenceId(req.getSequenceId()).build()).build());
}
boundaries.clear();
}
};
iterator.onHasNext().whenComplete(hasNextConsumer);
}
} else if (value.hasGetAddressesForKey()) {
hasActiveTransactionOrThrow();
GetAddressesForKeyRequest req = value.getGetAddressesForKey();
throwIfSequenceIdHasBeenSeen(req.getSequenceId());
if (logger.isDebugEnabled()) {
String msg = "GetAddressesForKey on: " + printable(value.getGetAddressesForKey().getKey().toByteArray());
if (overallSpan != null) {
overallSpan.event(msg);
}
logger.debug(msg);
}
getAddressesForKey.incrementAndGet();
// start the span/scope for the get_value call.
Span opSpan = tracer.nextSpan(overallSpan).name("execute_transaction.get_addresses_for_key");
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
CompletableFuture<String[]> getFuture = LocalityUtil.getAddressesForKey(tx, req.getKey().toByteArray());
getFuture.whenComplete((val, throwable) -> {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get addresses for key: " + printable(req.getKey().toByteArray()));
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
if (logger.isDebugEnabled()) {
String msg = "GetAddressesForKey on: " + printable(req.getKey().toByteArray()) + " is: " + Joiner.on(",").join(val);
logger.debug(msg);
opSpan.event(msg);
}
rowsRead.incrementAndGet();
GetAddressesForKeyResponse.Builder build = GetAddressesForKeyResponse.newBuilder().setSequenceId(req.getSequenceId());
if (val != null) {
build.addAllAddresses(Arrays.asList(val));
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetAddressesForKey(build.build()).build());
}
}
} finally {
opScope.close();
opSpan.end();
}
});
} else if (value.hasBatchedMutations()) {
hasActiveTransactionOrThrow();
List<BatchedMutations> mutations = value.getBatchedMutations().getMutationsList();
mutations.forEach(mutation -> {
if (mutation.hasSetValue()) {
setValue(mutation.getSetValue());
} else if (mutation.hasMutateValue()) {
mutateValue(mutation.getMutateValue());
} else if (mutation.hasClearKey()) {
clearKey(mutation.getClearKey());
} else if (mutation.hasClearRange()) {
clearRange(mutation.getClearRange());
} else if (mutation.hasAddConflictKey()) {
addConflictKey(mutation.getAddConflictKey());
} else if (mutation.hasAddConflictRange()) {
addConflictRange(mutation.getAddConflictRange());
}
});
}
}
private void setValue(SetValueRequest req) {
if (logger.isDebugEnabled()) {
String msg = "SetValueRequest for: " + printable(req.getKey().toByteArray()) + " => " + printable(req.getValue().toByteArray());
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
rowsWritten.incrementAndGet();
tx.set(req.getKey().toByteArray(), req.getValue().toByteArray());
}
private void clearKey(ClearKeyRequest req) {
if (logger.isDebugEnabled()) {
String msg = "ClearKeyRequest for: " + printable(req.getKey().toByteArray());
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
clears.incrementAndGet();
tx.clear(req.getKey().toByteArray());
}
private void clearRange(ClearKeyRangeRequest req) {
if (logger.isDebugEnabled()) {
String msg = "ClearKeyRangeRequest for: " + printable(req.getStart().toByteArray()) + " => " + printable(req.getEnd().toByteArray());
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
clears.incrementAndGet();
tx.clear(req.getStart().toByteArray(), req.getEnd().toByteArray());
}
private void addConflictKey(AddConflictKeyRequest req) {
if (logger.isDebugEnabled()) {
String msg = "AddConflictKeyRequest for: " + printable(req.getKey().toByteArray()) + " write: " + req.getWrite();
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
if (req.getWrite()) {
writeConflictAdds.incrementAndGet();
tx.addWriteConflictKey(req.getKey().toByteArray());
} else {
readConflictAdds.incrementAndGet();
tx.addReadConflictKey(req.getKey().toByteArray());
}
}
private void addConflictRange(AddConflictRangeRequest req) {
if (logger.isDebugEnabled()) {
String msg = "AddConflictRangeRequest from: " + printable(req.getStart().toByteArray()) + " to: " + printable(req.getEnd().toByteArray()) + " write: " + req.getWrite();
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
if (req.getWrite()) {
writeConflictAdds.incrementAndGet();
tx.addWriteConflictRange(req.getStart().toByteArray(), req.getEnd().toByteArray());
} else {
readConflictAdds.incrementAndGet();
tx.addReadConflictRange(req.getStart().toByteArray(), req.getEnd().toByteArray());
}
}
private void mutateValue(MutateValueRequest req) {
if (logger.isDebugEnabled()) {
String msg = "MutateValueRequest for: " + printable(req.getKey().toByteArray()) + " => " + printable(req.getParam().toByteArray()) + " with: " + req.getType();
logger.debug(msg);
if (overallSpan != null) {
overallSpan.event(msg);
}
}
rowsMutated.incrementAndGet();
tx.mutate(getMutationType(req.getType()), req.getKey().toByteArray(), req.getParam().toByteArray());
}
/**
* Use asList() with range gets (only enabled via
* {@link Configuration.InternalOptions#isUseAsListForRangeGets()}.
* <p>
* Normally, calls are routed to
* {@link #handleRangeGetWithAsyncIterator(StreamingDatabaseRequest, GetRangeRequest, Span, KeySelector, KeySelector, AsyncIterable)}
*/
private void handleRangeGetWithAsList(GetRangeRequest req, Span opSpan, KeySelector start, KeySelector end, AsyncIterable<KeyValue> range) {
Tracer.SpanInScope opScope = tracer.withSpan(opSpan.start());
range.asList().whenComplete((results, throwable) -> {
try (Tracer.SpanInScope ignored1 = tracer.withSpan(opSpan)) {
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get range for start: " + start + " end: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode());
sendErrorToRemote(throwable, req.getSequenceId(), responseObserver);
} else {
opSpan.tag("rows_read", String.valueOf(results.size()));
opSpan.tag("batches", String.valueOf(1));
if (logger.isDebugEnabled()) {
String msg = "GetRangeRequest from: " + start + " to: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode() + ", flushing: " + results.size() + " rows, seq_id: " + req.getSequenceId();
logger.debug(msg);
opSpan.event(msg);
}
if (config.getInternal().isSimulatePartitionsForAsListRangeGets()) {
if (results.isEmpty()) {
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetRange(GetRangeResponse.newBuilder().setDone(true).setSequenceId(req.getSequenceId()).build()).build());
}
}
List<List<KeyValue>> parts = Lists.partition(results, config.getInternal().getPartitionSizeForAsListRangeGets());
for (int i = 0; i < parts.size(); i++) {
List<KeyValue> keyValues = parts.get(i);
boolean done = (i == parts.size() - 1);
if (logger.isDebugEnabled()) {
String msg = "GetRangeRequest from: " + start + " to: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode() + ", flushing: " + keyValues.size() + " rows, done: " + done + ", seq_id: " + req.getSequenceId();
logger.debug(msg);
opSpan.event(msg);
}
GetRangeResponse.Builder builder = GetRangeResponse.newBuilder().setDone(done).setSequenceId(req.getSequenceId());
for (KeyValue result : keyValues) {
builder.addKeyValuesBuilder().setKey(ByteString.copyFrom(result.getKey())).setValue(ByteString.copyFrom(result.getValue()));
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetRange(builder.build()).build());
}
}
} else {
// do not partition, send as a single batch.
if (logger.isDebugEnabled()) {
String msg = "GetRangeRequest from: " + start + " to: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode() + ", flushing: " + results.size() + " rows, done: true, seq_id: " + req.getSequenceId();
logger.debug(msg);
opSpan.event(msg);
}
GetRangeResponse.Builder builder = GetRangeResponse.newBuilder().setDone(true).setSequenceId(req.getSequenceId());
for (KeyValue result : results) {
builder.addKeyValuesBuilder().setKey(ByteString.copyFrom(result.getKey())).setValue(ByteString.copyFrom(result.getValue()));
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetRange(builder.build()).build());
}
}
}
} finally {
opScope.close();
opSpan.end();
}
});
}
private void handleRangeGetWithAsyncIterator(StreamingDatabaseRequest value, GetRangeRequest req, Span opSpan, KeySelector start, KeySelector end, AsyncIterable<KeyValue> range) {
AsyncIterator<KeyValue> iterator = range.iterator();
// consumer that collects key values and returns them to the user.
AtomicLong rows = new AtomicLong();
AtomicLong batches = new AtomicLong();
BiConsumer<Boolean, Throwable> hasNextConsumer = new BiConsumer<>() {
private final GetRangeResponse.Builder responseBuilder = GetRangeResponse.newBuilder();
@Override
public void accept(Boolean hasNext, Throwable throwable) {
try (Tracer.SpanInScope ignored = tracer.withSpan(opSpan)) {
boolean done = false;
if (throwable != null) {
handleThrowable(opSpan, throwable, () -> "failed to get range for start: " + start + " end: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode());
OperationFailureResponse.Builder builder = OperationFailureResponse.newBuilder().setSequenceId(value.getGetRange().getSequenceId()).setMessage(throwable.getMessage());
if (throwable instanceof FDBException) {
builder.setCode(((FDBException) throwable).getCode());
}
opSpan.tag("rows_read", String.valueOf(rows.get()));
opSpan.tag("batches", String.valueOf(batches.get()));
opSpan.end();
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setOperationFailure(builder.build()).build());
}
return;
} else if (!hasNext) {
// no more rows to read, flush the last message.
done = true;
} else {
// spool everything until the onHasNext CompletableFuture is pending.
while (iterator.onHasNext().isDone() && !iterator.onHasNext().isCompletedExceptionally()) {
if (!iterator.hasNext()) {
done = true;
break;
}
KeyValue next = iterator.next();
rows.incrementAndGet();
responseBuilder.addKeyValuesBuilder().setKey(ByteString.copyFrom(next.getKey())).setValue(ByteString.copyFrom(next.getValue()));
}
}
// flush what we have.
flush(done);
if (done) {
opSpan.tag("rows_read", String.valueOf(rows.get()));
opSpan.tag("batches", String.valueOf(batches.get()));
opSpan.end();
} else {
// schedule the callback on when the future is ready.
iterator.onHasNext().whenComplete(this);
}
}
}
private void flush(boolean done) {
int keyValuesCount = responseBuilder.getKeyValuesCount();
if (!done && keyValuesCount == 0) {
return;
}
batches.incrementAndGet();
rangeGetBatches.incrementAndGet();
rowsRead.addAndGet(keyValuesCount);
if (logger.isDebugEnabled()) {
String msg = "GetRangeRequest from: " + start + " to: " + end + " reverse: " + req.getReverse() + " limit: " + req.getLimit() + " mode: " + req.getStreamingMode() + ", flushing: " + keyValuesCount + " rows, done: " + done + " seq_id: " + req.getSequenceId();
logger.debug(msg);
opSpan.event(msg);
}
synchronized (responseObserver) {
responseObserver.onNext(StreamingDatabaseResponse.newBuilder().setGetRange(responseBuilder.setDone(done).setSequenceId(req.getSequenceId()).build()).build());
}
responseBuilder.clear();
}
};
iterator.onHasNext().whenComplete(hasNextConsumer);
}
private void addLongLivingFuture(CompletableFuture<?> future) {
synchronized (this) {
this.longLivingFutures.add(future);
}
}
private void hasActiveTransactionOrThrow() {
if (tx == null && !commitStarted.get()) {
StatusRuntimeException toThrow = Status.INVALID_ARGUMENT.withDescription("must have an active transaction").asRuntimeException();
synchronized (responseObserver) {
responseObserver.onError(toThrow);
}
throw toThrow;
}
}
@Override
public void onError(Throwable t) {
populateOverallSpanStats();
longLivingFutures.forEach(x -> x.cancel(false));
closeAndDiscardTx();
if (t instanceof StatusRuntimeException && ((StatusRuntimeException) t).getStatus().getCode() == Status.CANCELLED.getCode()) {
logger.warn("client cancelled (likely no commit)");
synchronized (responseObserver) {
try {
responseObserver.onCompleted();
} catch (RuntimeException ignored) {
}
}
} else {
logger.warn("onError from client in executeTransaction", t);
synchronized (responseObserver) {
try {
responseObserver.onError(t);
} catch (RuntimeException ignored) {
}
}
}
}
@Override
public void onCompleted() {
logger.debug("client onCompleted()");
populateOverallSpanStats();
longLivingFutures.forEach(x -> x.cancel(false));
closeAndDiscardTx();
}
private void throwIfSequenceIdHasBeenSeen(long sequenceId) {
if (!knownSequenceIds.add(sequenceId)) {
onError(Status.INVALID_ARGUMENT.withDescription("sequenceId: " + sequenceId + " has been seen before in this transaction").asRuntimeException());
closeAndDiscardTx();
}
}
private void closeAndDiscardTx() {
if (tx != null) {
tx.close();
tx = null;
}
}
private void populateOverallSpanStats() {
if (overallSpan != null) {
if (rowsRead.get() > 0) {
overallSpan.tag("rows_read", String.valueOf(rowsRead.get()));
}
if (rangeGetBatches.get() > 0) {
overallSpan.tag("range_get.batches", String.valueOf(rangeGetBatches.get()));
}
if (rowsWritten.get() > 0) {
overallSpan.tag("rows_written", String.valueOf(rowsWritten.get()));
}
if (clears.get() > 0) {
overallSpan.tag("clears", String.valueOf(clears.get()));
}
if (getReadVersion.get() > 0) {
overallSpan.tag("read_version.gets", String.valueOf(getReadVersion.get()));
}
if (readConflictAdds.get() > 0) {
overallSpan.tag("add_read_conflicts", String.valueOf(readConflictAdds.get()));
}
if (writeConflictAdds.get() > 0) {
overallSpan.tag("add_write_conflicts", String.valueOf(writeConflictAdds.get()));
}
if (rowsMutated.get() > 0) {
overallSpan.tag("rows_mutated", String.valueOf(rowsMutated.get()));
}
if (getVersionstamp.get() > 0) {
overallSpan.tag("get_versionstamp", String.valueOf(getVersionstamp.get()));
}
if (keysRead.get() > 0) {
overallSpan.tag("keys_read", String.valueOf(keysRead.get()));
}
if (getApproximateSize.get() > 0) {
overallSpan.tag("get_approximate_size", String.valueOf(getApproximateSize.get()));
}
if (getEstimatedRangeSize.get() > 0) {
overallSpan.tag("get_estimated_range_size", String.valueOf(getEstimatedRangeSize.get()));
}
if (getBoundaryKeys.get() > 0) {
overallSpan.tag("get_boundary_keys", String.valueOf(getBoundaryKeys.get()));
}
if (getAddressesForKey.get() > 0) {
overallSpan.tag("get_addresses_for_key", String.valueOf(getAddressesForKey.get()));
}
}
}
};
}
use of com.apple.foundationdb.async.AsyncIterator in project fdb-record-layer by FoundationDB.
the class IndexingByIndex method buildRangeOnly.
@Nonnull
private CompletableFuture<Boolean> buildRangeOnly(@Nonnull FDBRecordStore store, byte[] startBytes, byte[] endBytes, @Nonnull AtomicLong recordsScanned) {
// return false when done
validateSameMetadataOrThrow(store);
final Index index = common.getIndex();
final IndexMaintainer maintainer = store.getIndexMaintainer(index);
// idempotence - We could have verified it at the first iteration only, but the repeating checks seem harmless
validateOrThrowEx(maintainer.isIdempotent(), "target index is not idempotent");
// readability - This method shouldn't block if one has already opened the record store (as we did)
Index srcIndex = getSourceIndex(store.getRecordMetaData());
validateOrThrowEx(store.isIndexReadable(srcIndex), "source index is not readable");
RangeSet rangeSet = new RangeSet(store.indexRangeSubspace(index));
AsyncIterator<Range> ranges = rangeSet.missingRanges(store.ensureContextActive(), startBytes, endBytes).iterator();
final ExecuteProperties.Builder executeProperties = ExecuteProperties.newBuilder().setIsolationLevel(IsolationLevel.SNAPSHOT).setReturnedRowLimit(// respect limit in this path; +1 allows a continuation item
getLimit() + 1);
final ScanProperties scanProperties = new ScanProperties(executeProperties.build());
return ranges.onHasNext().thenCompose(hasNext -> {
if (Boolean.FALSE.equals(hasNext)) {
// no more missing ranges - all done
return AsyncUtil.READY_FALSE;
}
final Range range = ranges.next();
final Tuple rangeStart = RangeSet.isFirstKey(range.begin) ? null : Tuple.fromBytes(range.begin);
final Tuple rangeEnd = RangeSet.isFinalKey(range.end) ? null : Tuple.fromBytes(range.end);
final TupleRange tupleRange = TupleRange.between(rangeStart, rangeEnd);
RecordCursor<FDBIndexedRecord<Message>> cursor = store.scanIndexRecords(srcIndex.getName(), IndexScanType.BY_VALUE, tupleRange, null, scanProperties);
final AtomicReference<RecordCursorResult<FDBIndexedRecord<Message>>> lastResult = new AtomicReference<>(RecordCursorResult.exhausted());
final AtomicBoolean hasMore = new AtomicBoolean(true);
// Note that currently indexing by index is online implemented for idempotent indexes
final boolean isIdempotent = true;
return iterateRangeOnly(store, cursor, this::getRecordIfTypeMatch, lastResult, hasMore, recordsScanned, isIdempotent).thenApply(vignore -> hasMore.get() ? lastResult.get().get().getIndexEntry().getKey() : rangeEnd).thenCompose(cont -> rangeSet.insertRange(store.ensureContextActive(), packOrNull(rangeStart), packOrNull(cont), true).thenApply(ignore -> !Objects.equals(cont, rangeEnd)));
});
}
use of com.apple.foundationdb.async.AsyncIterator in project fdb-record-layer by FoundationDB.
the class IndexingMultiTargetByRecords method buildRangeOnly.
@Nonnull
private CompletableFuture<Boolean> buildRangeOnly(@Nonnull FDBRecordStore store, byte[] startBytes, byte[] endBytes, @Nonnull AtomicLong recordsScanned) {
// return false when done
/* Multi target consistency:
* 1. Identify missing ranges from only the first index
* 2. Update all indexes' range sets as the indexes are built - each inserted range is validated as empty.
* 3. While each index as readable, we validate that its range is completely built.
*/
validateSameMetadataOrThrow(store);
RangeSet rangeSet = new RangeSet(store.indexRangeSubspace(common.getPrimaryIndex()));
AsyncIterator<Range> ranges = rangeSet.missingRanges(store.ensureContextActive(), startBytes, endBytes).iterator();
final List<Index> targetIndexes = common.getTargetIndexes();
final List<RangeSet> targetRangeSets = targetIndexes.stream().map(targetIndex -> new RangeSet(store.indexRangeSubspace(targetIndex))).collect(Collectors.toList());
final boolean isIdempotent = areTheyAllIdempotent(store, targetIndexes);
final IsolationLevel isolationLevel = isIdempotent ? IsolationLevel.SNAPSHOT : IsolationLevel.SERIALIZABLE;
final ExecuteProperties.Builder executeProperties = ExecuteProperties.newBuilder().setIsolationLevel(isolationLevel).setReturnedRowLimit(// always respect limit in this path; +1 allows a continuation item
getLimit() + 1);
final ScanProperties scanProperties = new ScanProperties(executeProperties.build());
return ranges.onHasNext().thenCompose(hasNext -> {
if (Boolean.FALSE.equals(hasNext)) {
// no more missing ranges - all done
return AsyncUtil.READY_FALSE;
}
final Range range = ranges.next();
final Tuple rangeStart = RangeSet.isFirstKey(range.begin) ? null : Tuple.fromBytes(range.begin);
final Tuple rangeEnd = RangeSet.isFinalKey(range.end) ? null : Tuple.fromBytes(range.end);
final TupleRange tupleRange = TupleRange.between(rangeStart, rangeEnd);
RecordCursor<FDBStoredRecord<Message>> cursor = store.scanRecords(tupleRange, null, scanProperties);
final AtomicReference<RecordCursorResult<FDBStoredRecord<Message>>> lastResult = new AtomicReference<>(RecordCursorResult.exhausted());
final AtomicBoolean hasMore = new AtomicBoolean(true);
return iterateRangeOnly(store, cursor, this::getRecordIfTypeMatch, lastResult, hasMore, recordsScanned, isIdempotent).thenApply(vignore -> hasMore.get() ? lastResult.get().get().getPrimaryKey() : rangeEnd).thenCompose(cont -> insertRanges(store.ensureContextActive(), targetRangeSets, packOrNull(rangeStart), packOrNull(cont)).thenApply(ignore -> !Objects.equals(cont, rangeEnd)));
});
}
Aggregations