use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class KeySpaceDirectory method listSubdirectoryAsync.
@Nonnull
// SonarQube doesn't realize that the cursor is wrapped and returned
@SuppressWarnings("squid:S2095")
protected RecordCursor<ResolvedKeySpacePath> listSubdirectoryAsync(@Nullable KeySpacePath listFrom, @Nonnull FDBRecordContext context, @Nonnull String subdirName, @Nullable ValueRange<?> valueRange, @Nullable byte[] continuation, @Nonnull ScanProperties scanProperties) {
if (listFrom != null && listFrom.getDirectory() != this) {
throw new RecordCoreException("Provided path does not belong to this directory").addLogInfo("path", listFrom, "directory", this.getName());
}
final KeySpaceDirectory subdir = getSubdirectory(subdirName);
final CompletableFuture<ResolvedKeySpacePath> resolvedFromFuture = listFrom == null ? CompletableFuture.completedFuture(null) : listFrom.toResolvedPathAsync(context);
// The chained cursor cannot implement reverse scan, so we implement it by having the
// inner key value cursor do the reversing but telling the chained cursor we are moving
// forward.
final ScanProperties chainedCursorScanProperties;
if (scanProperties.isReverse()) {
chainedCursorScanProperties = scanProperties.setReverse(false);
} else {
chainedCursorScanProperties = scanProperties;
}
// For the read of the individual row keys, we only want to read a single key. In addition,
// the ChainedCursor is going to do counting of our reads to apply any limits that were specified
// on the ScanProperties. We don't want the inner KeyValueCursor in nextTuple() to ALSO count those
// same reads so we clear out its limits.
final ScanProperties keyReadScanProperties = scanProperties.with(props -> props.clearState().setReturnedRowLimit(1));
return new LazyCursor<>(resolvedFromFuture.thenCompose(resolvedFrom -> {
final Subspace subspace = resolvedFrom == null ? new Subspace() : resolvedFrom.toSubspace();
return subdir.getValueRange(context, valueRange, subspace).thenApply(range -> {
final RecordCursor<Tuple> cursor = new ChainedCursor<>(context, lastKey -> nextTuple(context, subspace, range, lastKey, keyReadScanProperties), Tuple::pack, Tuple::fromBytes, continuation, chainedCursorScanProperties);
return cursor.mapPipelined(tuple -> {
final Tuple key = Tuple.fromList(tuple.getItems());
return findChildForKey(context, resolvedFrom, key, 1, 0);
}, 1);
});
}), context.getExecutor());
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class InComparisonToExplodeRule method onMatch.
@Override
public void onMatch(@Nonnull PlannerRuleCall call) {
final PlannerBindings bindings = call.getBindings();
final SelectExpression selectExpression = bindings.get(root);
// we don't need iteration stability
final List<? extends ValuePredicate> inPredicatesList = bindings.getAll(inPredicateMatcher);
if (inPredicatesList.isEmpty()) {
return;
}
final Set<QueryPredicate> inPredicates = Sets.newIdentityHashSet();
inPredicates.addAll(inPredicatesList);
final ImmutableList.Builder<Quantifier> transformedQuantifiers = ImmutableList.builder();
final ImmutableList.Builder<QueryPredicate> transformedPredicates = ImmutableList.builder();
for (final QueryPredicate predicate : selectExpression.getPredicates()) {
if (inPredicates.contains(predicate)) {
final ValuePredicate valuePredicate = (ValuePredicate) predicate;
final Comparisons.Comparison comparison = valuePredicate.getComparison();
Verify.verify(comparison.getType() == Comparisons.Type.IN);
final ExplodeExpression explodeExpression;
if (comparison instanceof Comparisons.ListComparison) {
explodeExpression = new ExplodeExpression(new LiteralValue<>(comparison.getComparand()));
} else if (comparison instanceof Comparisons.ParameterComparison) {
explodeExpression = new ExplodeExpression(QuantifiedColumnValue.of(CorrelationIdentifier.of(((Comparisons.ParameterComparison) comparison).getParameter()), 0));
} else {
throw new RecordCoreException("unknown in comparison " + comparison.getClass().getSimpleName());
}
final Quantifier.ForEach newQuantifier = Quantifier.forEach(GroupExpressionRef.of(explodeExpression));
transformedPredicates.add(new ValuePredicate(((ValuePredicate) predicate).getValue(), new Comparisons.ParameterComparison(Comparisons.Type.EQUALS, Bindings.Internal.CORRELATION.bindingName(newQuantifier.getAlias().toString()), Bindings.Internal.CORRELATION)));
transformedQuantifiers.add(newQuantifier);
} else {
transformedPredicates.add(predicate);
}
}
transformedQuantifiers.addAll(bindings.getAll(innerQuantifierMatcher));
call.yield(call.ref(new SelectExpression(selectExpression.getResultValues(), transformedQuantifiers.build(), transformedPredicates.build())));
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class TextIndexTest method textIndexPerf1000ParallelInsert.
@Tag(Tags.Performance)
@Test
public void textIndexPerf1000ParallelInsert() throws Exception {
// Create 1000 records
Random r = new Random();
List<SimpleDocument> records = getRandomRecords(r, 1000);
try (FDBRecordContext context = openContext()) {
openRecordStore(context);
recordStore.asBuilder().create();
commit(context);
}
final FDBRecordStore.Builder storeBuilder = recordStore.asBuilder();
long startTime = System.nanoTime();
int oldMaxAttempts = FDBDatabaseFactory.instance().getMaxAttempts();
FDBDatabaseFactory.instance().setMaxAttempts(Integer.MAX_VALUE);
try {
CompletableFuture<?>[] workerFutures = new CompletableFuture<?>[10];
int recordsPerWorker = records.size() / workerFutures.length;
for (int i = 0; i < workerFutures.length; i++) {
List<SimpleDocument> workerDocs = records.subList(i * recordsPerWorker, (i + 1) * recordsPerWorker);
CompletableFuture<Void> workerFuture = new CompletableFuture<>();
Thread workerThread = new Thread(() -> {
try {
for (int j = 0; j < workerDocs.size(); j += 10) {
// Use retry loop to catch not_committed errors
List<SimpleDocument> batchDocuments = workerDocs.subList(j, j + 10);
fdb.run(context -> {
try {
FDBRecordStore store = storeBuilder.copyBuilder().setContext(context).open();
for (SimpleDocument document : batchDocuments) {
store.saveRecord(document);
}
return null;
} catch (RecordCoreException e) {
throw e;
} catch (Exception e) {
throw new RecordCoreException(e);
}
});
}
workerFuture.complete(null);
} catch (RuntimeException e) {
workerFuture.completeExceptionally(e);
}
});
workerThread.setName("insert-worker-" + i);
workerThread.start();
workerFutures[i] = workerFuture;
}
CompletableFuture.allOf(workerFutures).get();
long endTime = System.nanoTime();
LOGGER.info("performed 1000 parallel insertions in {} seconds.", (endTime - startTime) * 1e-9);
printUsage();
} finally {
FDBDatabaseFactory.instance().setMaxAttempts(oldMaxAttempts);
}
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class GeophileSpatialJoin method recordCursor.
@Nonnull
public RecordCursor<Pair<IndexEntry, IndexEntry>> recordCursor(@Nonnull SpatialIndex<GeophileRecordImpl> left, @Nonnull SpatialIndex<GeophileRecordImpl> right) {
// TODO: This is a synchronous implementation using Iterators. A proper RecordCursor implementation needs
// Geophile async extensions. Also need to pass down executeProperties.
final Iterator<com.geophile.z.Pair<GeophileRecordImpl, GeophileRecordImpl>> iterator;
try {
iterator = spatialJoin.iterator(left, right);
} catch (IOException ex) {
throw new RecordCoreException("Unexpected IO exception", ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RecordCoreException(ex);
}
final RecordCursor<com.geophile.z.Pair<GeophileRecordImpl, GeophileRecordImpl>> recordCursor = RecordCursor.fromIterator(store.getExecutor(), iterator);
return recordCursor.map(p -> Pair.of(p.left().getIndexEntry(), p.right().getIndexEntry()));
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class GeophileSpatialJoin method getSpatialIndex.
@Nonnull
public SpatialIndex<GeophileRecordImpl> getSpatialIndex(@Nonnull String indexName, @Nonnull ScanComparisons prefixComparisons, @Nonnull BiFunction<IndexEntry, Tuple, GeophileRecordImpl> recordFunction) {
if (!prefixComparisons.isEquality()) {
throw new RecordCoreArgumentException("prefix comparisons must only have equality");
}
// TODO: Add a FDBRecordStoreBase.getIndexMaintainer String overload to do this.
final IndexMaintainer indexMaintainer = store.getIndexMaintainer(store.getRecordMetaData().getIndex(indexName));
final TupleRange prefixRange = prefixComparisons.toTupleRange(store, context);
// Since this is an equality, will match getHigh(), too.
final Tuple prefix = prefixRange.getLow();
final Index<GeophileRecordImpl> index = new GeophileIndexImpl(indexMaintainer, prefix, recordFunction);
final Space space = ((GeophileIndexMaintainer) indexMaintainer).getSpace();
try {
return SpatialIndex.newSpatialIndex(space, index);
} catch (IOException ex) {
throw new RecordCoreException("Unexpected IO exception", ex);
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RecordCoreException(ex);
}
}
Aggregations