use of org.neo4j.storageengine.api.StorageNodeCursor in project neo4j by neo4j.
the class GenerateIndexUpdatesStepTest method shouldSendSingleBatchIfBelowMaxSizeThreshold.
@ValueSource(booleans = { true, false })
@ParameterizedTest
void shouldSendSingleBatchIfBelowMaxSizeThreshold(boolean alsoWrite) throws Exception {
// given
StubStorageCursors data = someUniformData(10);
TestPropertyScanConsumer scanConsumer = new TestPropertyScanConsumer();
GenerateIndexUpdatesStep<StorageNodeCursor> step = new GenerateIndexUpdatesStep(new SimpleStageControl(), DEFAULT, data, alwaysTrue(), new NodeCursorBehaviour(data), new int[] { LABEL }, scanConsumer, null, NO_LOCKING, 1, mebiBytes(1), alsoWrite, PageCacheTracer.NULL, INSTANCE);
// when
CapturingBatchSender<GeneratedIndexUpdates> sender = new CapturingBatchSender<>();
step.process(allNodeIds(data), sender, NULL);
// then
if (alsoWrite) {
assertThat(sender.batches).isEmpty();
assertThat(scanConsumer.batches.size()).isEqualTo(1);
assertThat(scanConsumer.batches.get(0).size()).isEqualTo(10);
} else {
assertThat(sender.batches.size()).isEqualTo(1);
assertThat(scanConsumer.batches).isEmpty();
}
}
use of org.neo4j.storageengine.api.StorageNodeCursor in project neo4j by neo4j.
the class GenerateIndexUpdatesStepTest method shouldGenerateEntityTokenUpdates.
@ValueSource(booleans = { true, false })
@ParameterizedTest
void shouldGenerateEntityTokenUpdates(boolean alsoWrite) throws Exception {
// given
StubStorageCursors data = someUniformData(10);
TestTokenScanConsumer scanConsumer = new TestTokenScanConsumer();
GenerateIndexUpdatesStep<StorageNodeCursor> step = new GenerateIndexUpdatesStep<>(new SimpleStageControl(), DEFAULT, data, alwaysTrue(), new NodeCursorBehaviour(data), new int[] { LABEL }, null, scanConsumer, NO_LOCKING, 1, mebiBytes(1), alsoWrite, PageCacheTracer.NULL, INSTANCE);
Set<TestTokenScanConsumer.Record> expectedUpdates = new HashSet<>();
try (StorageNodeCursor cursor = data.allocateNodeCursor(NULL)) {
cursor.scan();
while (cursor.next()) {
expectedUpdates.add(new TestTokenScanConsumer.Record(cursor.entityReference(), cursor.labels()));
}
}
// when
CapturingBatchSender<GeneratedIndexUpdates> sender = new CapturingBatchSender<>();
step.process(allNodeIds(data), sender, NULL);
// then
if (alsoWrite) {
for (TestTokenScanConsumer.Record tokenUpdate : scanConsumer.batches.get(0)) {
assertThat(expectedUpdates.remove(tokenUpdate)).isTrue();
}
} else {
GeneratedIndexUpdates updates = sender.batches.get(0);
updates.completeBatch();
for (TestTokenScanConsumer.Record tokenUpdate : scanConsumer.batches.get(0)) {
assertThat(expectedUpdates.remove(tokenUpdate)).isTrue();
}
}
assertThat(expectedUpdates).isEmpty();
}
use of org.neo4j.storageengine.api.StorageNodeCursor in project neo4j by neo4j.
the class StoreScanStageTest method shouldGenerateUpdatesInParallel.
@ValueSource(booleans = { true, false })
@ParameterizedTest(name = "parallelWrite={0}")
void shouldGenerateUpdatesInParallel(boolean parallelWrite) {
// given
StubStorageCursors data = someData();
EntityIdIterator entityIdIterator = new CursorEntityIdIterator<>(data.allocateNodeCursor(NULL));
var propertyConsumer = new ThreadCapturingPropertyConsumer();
var tokenConsumer = new ThreadCapturingTokenConsumer();
ControlledLockFunction lockFunction = new ControlledLockFunction();
StoreScanStage<StorageNodeCursor> scan = new StoreScanStage<>(dbConfig, config, ct -> entityIdIterator, NO_EXTERNAL_UPDATES, new AtomicBoolean(true), data, new int[] { LABEL }, alwaysTrue(), propertyConsumer, tokenConsumer, new NodeCursorBehaviour(data), lockFunction, parallelWrite, jobScheduler, PageCacheTracer.NULL, EmptyMemoryTracker.INSTANCE);
// when
runScan(scan);
// then it completes and we see > 1 threads
assertThat(lockFunction.seenThreads.size()).isGreaterThan(1);
if (parallelWrite) {
assertThat(propertyConsumer.seenThreads.size()).isGreaterThan(1);
assertThat(tokenConsumer.seenThreads.size()).isGreaterThan(1);
} else {
assertThat(propertyConsumer.seenThreads.size()).isEqualTo(1);
assertThat(tokenConsumer.seenThreads.size()).isEqualTo(1);
}
}
use of org.neo4j.storageengine.api.StorageNodeCursor in project neo4j by neo4j.
the class StoreScanStageTest method shouldPanicAndExitStageOnWriteFailure.
@Test
void shouldPanicAndExitStageOnWriteFailure() {
// given
StubStorageCursors data = someData();
EntityIdIterator entityIdIterator = new CursorEntityIdIterator<>(data.allocateNodeCursor(NULL));
var failingWriter = new PropertyConsumer(() -> {
throw new IllegalStateException("Failed to write");
});
StoreScanStage<StorageNodeCursor> scan = new StoreScanStage<>(dbConfig, config, ct -> entityIdIterator, NO_EXTERNAL_UPDATES, new AtomicBoolean(true), data, new int[] { LABEL }, alwaysTrue(), failingWriter, null, new NodeCursorBehaviour(data), id -> null, true, jobScheduler, PageCacheTracer.NULL, EmptyMemoryTracker.INSTANCE);
// when/then
assertThatThrownBy(() -> runScan(scan)).isInstanceOf(IllegalStateException.class).hasMessageContaining("Failed to write");
}
use of org.neo4j.storageengine.api.StorageNodeCursor in project neo4j by neo4j.
the class StoreScanStageTest method shouldReportCorrectNumberOfEntitiesProcessed.
@Test
void shouldReportCorrectNumberOfEntitiesProcessed() {
// given
StubStorageCursors data = someData();
AtomicReference<StoreScanStage<StorageNodeCursor>> stage = new AtomicReference<>();
EntityIdIterator entityIdIterator = new CursorEntityIdIterator<>(data.allocateNodeCursor(NULL)) {
private long manualCounter;
@Override
protected boolean fetchNext() {
assertThat(stage.get().numberOfIteratedEntities()).isEqualTo((manualCounter / config.batchSize()) * config.batchSize());
manualCounter++;
return super.fetchNext();
}
};
StoreScanStage<StorageNodeCursor> scan = new StoreScanStage(dbConfig, config, ct -> entityIdIterator, NO_EXTERNAL_UPDATES, new AtomicBoolean(true), data, new int[] { LABEL }, alwaysTrue(), new ThreadCapturingPropertyConsumer(), new ThreadCapturingTokenConsumer(), new NodeCursorBehaviour(data), l -> LockService.NO_LOCK, true, jobScheduler, PageCacheTracer.NULL, EmptyMemoryTracker.INSTANCE);
stage.set(scan);
// when
runScan(scan);
// then
assertThat(scan.numberOfIteratedEntities()).isEqualTo((long) config.batchSize() * NUMBER_OF_BATCHES);
}
Aggregations