use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method deleteAllRecords.
@Override
public void deleteAllRecords() {
preloadCache.invalidateAll();
Transaction tr = ensureContextActive();
// Clear out all data except for the store header key and the index state space.
// Those two subspaces are determined by the configuration of the record store rather then
// the records.
Range indexStateRange = indexStateSubspace().range();
tr.clear(recordsSubspace().getKey(), indexStateRange.begin);
tr.clear(indexStateRange.end, getSubspace().range().end);
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method getSnapshotRecordCount.
@Override
public CompletableFuture<Long> getSnapshotRecordCount(@Nonnull KeyExpression key, @Nonnull Key.Evaluated value, @Nonnull IndexQueryabilityFilter indexQueryabilityFilter) {
if (getRecordMetaData().getRecordCountKey() != null) {
if (key.getColumnSize() != value.size()) {
throw recordCoreException("key and value are not the same size");
}
final ReadTransaction tr = context.readTransaction(true);
final Tuple subkey = Tuple.from(RECORD_COUNT_KEY).addAll(value.toTupleAppropriateList());
if (getRecordMetaData().getRecordCountKey().equals(key)) {
return tr.get(getSubspace().pack(subkey)).thenApply(FDBRecordStore::decodeRecordCount);
} else if (key.isPrefixKey(getRecordMetaData().getRecordCountKey())) {
AsyncIterable<KeyValue> kvs = tr.getRange(getSubspace().range(Tuple.from(RECORD_COUNT_KEY)));
return MoreAsyncUtil.reduce(getExecutor(), kvs.iterator(), 0L, (count, kv) -> count + decodeRecordCount(kv.getValue()));
}
}
return evaluateAggregateFunction(Collections.emptyList(), IndexFunctionHelper.count(key), TupleRange.allOf(value.toTuple()), IsolationLevel.SNAPSHOT, indexQueryabilityFilter).thenApply(tuple -> tuple.getLong(0));
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class FDBRecordStore method markIndexReadable.
/**
* Marks an index as readable. See the version of
* {@link #markIndexReadable(String) markIndexReadable()}
* that takes a {@link String} as a parameter for more details.
*
* @param index the index to mark readable
* @return a future that will either complete exceptionally if the index can not
* be made readable or will contain <code>true</code> if the store was modified
* and <code>false</code> otherwise
*/
@Nonnull
public CompletableFuture<Boolean> markIndexReadable(@Nonnull Index index) {
if (recordStoreStateRef.get() == null) {
return preloadRecordStoreStateAsync().thenCompose(vignore -> markIndexReadable(index));
}
addIndexStateReadConflict(index.getName());
beginRecordStoreStateWrite();
boolean haveFuture = false;
try {
Transaction tr = ensureContextActive();
byte[] indexKey = indexStateSubspace().pack(index.getName());
CompletableFuture<Boolean> future = tr.get(indexKey).thenCompose(previous -> {
if (previous != null) {
CompletableFuture<Optional<Range>> builtFuture = firstUnbuiltRange(index);
CompletableFuture<Optional<RecordIndexUniquenessViolation>> uniquenessFuture = whenAllIndexUniquenessCommitChecks(index).thenCompose(vignore -> scanUniquenessViolations(index, 1).first());
return CompletableFuture.allOf(builtFuture, uniquenessFuture).thenApply(vignore -> {
Optional<Range> firstUnbuilt = context.join(builtFuture);
Optional<RecordIndexUniquenessViolation> uniquenessViolation = context.join(uniquenessFuture);
if (firstUnbuilt.isPresent()) {
throw new IndexNotBuiltException("Attempted to make unbuilt index readable", firstUnbuilt.get(), LogMessageKeys.INDEX_NAME, index.getName(), "unbuiltRangeBegin", ByteArrayUtil2.loggable(firstUnbuilt.get().begin), "unbuiltRangeEnd", ByteArrayUtil2.loggable(firstUnbuilt.get().end), subspaceProvider.logKey(), subspaceProvider.toString(context), LogMessageKeys.SUBSPACE_KEY, index.getSubspaceKey());
} else if (uniquenessViolation.isPresent()) {
RecordIndexUniquenessViolation wrapped = new RecordIndexUniquenessViolation("Uniqueness violation when making index readable", uniquenessViolation.get());
wrapped.addLogInfo(LogMessageKeys.INDEX_NAME, index.getName(), subspaceProvider.logKey(), subspaceProvider.toString(context));
throw wrapped;
} else {
updateIndexState(index.getName(), indexKey, IndexState.READABLE);
clearReadableIndexBuildData(tr, index);
return true;
}
});
} else {
return AsyncUtil.READY_FALSE;
}
}).whenComplete((b, t) -> endRecordStoreStateWrite()).thenApply(this::addRemoveReplacedIndexesCommitCheckIfChanged);
haveFuture = true;
return future;
} finally {
if (!haveFuture) {
endRecordStoreStateWrite();
}
}
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class IndexingBase method rebuildIndexAsync.
// rebuildIndexAsync - builds the whole index inline (without committing)
@Nonnull
public CompletableFuture<Void> rebuildIndexAsync(@Nonnull FDBRecordStore store) {
return forEachTargetIndex(index -> store.clearAndMarkIndexWriteOnly(index).thenCompose(bignore -> {
// Insert the full range into the range set. (The internal rebuild method only indexes the records and
// does not update the range set.) This is important because if marking the index as readable fails (for
// example, because of uniqueness violations), we still want to record in the range set that the entire
// range was built so that future index builds don't re-scan the record data and so that non-idempotent
// indexes know to update the index on all record saves.
Transaction tr = store.ensureContextActive();
RangeSet rangeSet = new RangeSet(store.indexRangeSubspace(index));
return rangeSet.insertRange(tr, null, null);
})).thenCompose(vignore -> rebuildIndexInternalAsync(store));
}
use of com.apple.foundationdb.Range in project fdb-record-layer by FoundationDB.
the class IndexingByRecords method buildEndpoints.
/**
* Builds (transactionally) the endpoints of an index. What this means is that builds everything from the beginning of
* the key space to the first record and everything from the last record to the end of the key space.
* There won't be any records within these ranges (except for the last record of the record store), but
* it does mean that any records in the future that get added to these ranges will correctly update
* the index. This means, e.g., that if the workload primarily adds records to the record store
* after the current last record (because perhaps the primary key is based off of an atomic counter
* or the current time), running this method will be highly contentious, but once it completes,
* the rest of the index build should happen without any more conflicts.
*
* This will return a (possibly null) {@link TupleRange} that contains the primary keys of the
* first and last records within the record store. This can then be used to either build the
* range right away or to then divy-up the remaining ranges between multiple agents working
* in parallel if one desires.
*
* @param store the record store in which to rebuild the index
* @param recordsScanned continues counter
* @return a future that will contain the range of records in the interior of the record store
*/
@Nonnull
public CompletableFuture<TupleRange> buildEndpoints(@Nonnull FDBRecordStore store, @Nullable AtomicLong recordsScanned) {
final RangeSet rangeSet = new RangeSet(store.indexRangeSubspace(common.getIndex()));
if (TupleRange.ALL.equals(recordsRange)) {
return buildEndpoints(store, rangeSet, recordsScanned);
}
// If records do not occupy whole range, first mark outside as built.
final Range asRange = recordsRange.toRange();
return CompletableFuture.allOf(rangeSet.insertRange(store.ensureContextActive(), null, asRange.begin), rangeSet.insertRange(store.ensureContextActive(), asRange.end, null)).thenCompose(vignore -> buildEndpoints(store, rangeSet, recordsScanned));
}
Aggregations