use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class MuninnPageCacheTest method markCursorContextAsDirtyWhenReadingDataFromMoreRecentTransactions.
@Test
void markCursorContextAsDirtyWhenReadingDataFromMoreRecentTransactions() throws IOException {
TestVersionContext versionContext = new TestVersionContext(() -> 3);
try (MuninnPageCache pageCache = createPageCache(fs, 2, PageCacheTracer.NULL);
PagedFile pagedFile = map(pageCache, file("a"), 8);
CursorContext cursorContext = new CursorContext(PageCursorTracer.NULL, versionContext)) {
versionContext.initWrite(7);
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK, cursorContext)) {
assertTrue(cursor.next());
cursor.putLong(3);
}
versionContext.initRead();
assertFalse(versionContext.isDirty());
try (PageCursor cursor = pagedFile.io(0, PF_SHARED_READ_LOCK, cursorContext)) {
assertTrue(cursor.next());
assertEquals(3, cursor.getLong());
assertTrue(versionContext.isDirty());
}
}
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class EncodingIdMapperTest method shouldDetectCorrectDuplicateInputIdsWhereManyAccidentalInManyGroups.
@Test
public void shouldDetectCorrectDuplicateInputIdsWhereManyAccidentalInManyGroups() {
// GIVEN
final ControlledEncoder encoder = new ControlledEncoder(new LongEncoder());
final int idsPerGroup = 20;
int groupCount = 5;
for (int i = 0; i < groupCount; i++) {
groups.getOrCreate("Group " + i);
}
IdMapper mapper = mapper(encoder, Radix.LONG, EncodingIdMapper.NO_MONITOR, ParallelSort.DEFAULT, numberOfCollisions -> new LongCollisionValues(NumberArrayFactories.HEAP, numberOfCollisions, INSTANCE));
final AtomicReference<Group> group = new AtomicReference<>();
PropertyValueLookup ids = (nodeId, cursorContext) -> {
int groupId = toIntExact(nodeId / idsPerGroup);
if (groupId == groupCount) {
return null;
}
group.set(groups.get(groupId));
// i.e. all first 10% in each group collides with all other first 10% in each group
if (nodeId % idsPerGroup < 2) {
// Let these colliding values encode into the same eId as well,
// so that they are definitely marked as collisions
encoder.useThisIdToEncodeNoMatterWhatComesIn(1234567L);
return nodeId % idsPerGroup;
}
// The other 90% will be accidental collisions for something else
encoder.useThisIdToEncodeNoMatterWhatComesIn((long) (123456 - group.get().id()));
return nodeId;
};
// WHEN
int count = idsPerGroup * groupCount;
for (long nodeId = 0; nodeId < count; nodeId++) {
mapper.put(ids.lookupProperty(nodeId, NULL), nodeId, group.get());
}
Collector collector = mock(Collector.class);
mapper.prepare(ids, collector, NONE);
// THEN
verifyNoMoreInteractions(collector);
for (long nodeId = 0; nodeId < count; nodeId++) {
assertEquals(nodeId, mapper.get(ids.lookupProperty(nodeId, NULL), group.get()));
}
verifyNoMoreInteractions(collector);
assertFalse(mapper.leftOverDuplicateNodesIds().hasNext());
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class StageTest method shouldCloseOnPanic.
@Test
void shouldCloseOnPanic() {
// given
// a producer, a processor, a forked processor and a final step
Configuration configuration = DEFAULT;
TrackingPanicMonitor panicMonitor = new TrackingPanicMonitor();
Stage stage = new Stage("test close on panic", null, configuration, random.nextBoolean() ? Step.ORDER_SEND_DOWNSTREAM : 0, ProcessorScheduler.SPAWN_THREAD, panicMonitor) {
{
// Producer
add(new PullingProducerStep(control(), configuration) {
private volatile long ticket;
private final ChaosMonkey chaosMonkey = new ChaosMonkey();
@Override
protected Object nextBatchOrNull(long ticket, int batchSize) {
chaosMonkey.makeChaos();
this.ticket = ticket;
return new int[batchSize];
}
@Override
protected long position() {
return ticket;
}
});
// Processor
add(new ProcessorStep<>(control(), "processor", configuration, 2, NULL) {
private final ChaosMonkey chaosMonkey = new ChaosMonkey();
@Override
protected void process(Object batch, BatchSender sender, CursorContext cursorContext) {
chaosMonkey.makeChaos();
sender.send(batch);
}
});
// Forked processor
add(new ForkedProcessorStep<>(control(), "forked processor", configuration) {
private final ChaosMonkey chaosMonkey = new ChaosMonkey();
@Override
protected void forkedProcess(int id, int processors, Object batch) {
chaosMonkey.makeChaos();
}
});
// Final consumer
add(new ProcessorStep<>(control(), "consumer", configuration, 1, NULL) {
private final ChaosMonkey chaosMonkey = new ChaosMonkey();
@Override
protected void process(Object batch, BatchSender sender, CursorContext cursorContext) throws Throwable {
chaosMonkey.makeChaos();
// don't pass the batch further, i.e. end of the line
}
});
}
};
// when/then
assertThrows(RuntimeException.class, () -> superviseDynamicExecution(stage));
assertTrue(panicMonitor.hasReceivedPanic());
assertTrue(panicMonitor.getReceivedPanic().getMessage().contains("Chaos monkey"));
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class ReadEntityIdsStep method process.
@Override
protected void process() {
cursorContext = new CursorContext(pageCacheTracer.createPageCursorTracer(CURSOR_TRACER_TAG));
entityIdIterator = entityIdIteratorSupplier.apply(cursorContext);
super.process();
}
use of org.neo4j.io.pagecache.context.CursorContext in project neo4j by neo4j.
the class GBPTreeGenericCountsStore method directUpdater.
/**
* Opens and returns a {@link CountUpdater} which makes direct insertions into the backing tree. This comes from the use case of having a way
* to build the initial data set without the context of transactions, such as batch-insertion or initial import.
*
* @param applyDeltas if {@code true} the writer will apply the changes as deltas, which means reading from the tree.
* If {@code false} all changes will be written as-is, i.e. as if they are absolute counts.
*/
protected CountUpdater directUpdater(boolean applyDeltas, CursorContext cursorContext) throws IOException {
boolean success = false;
Lock lock = this.lock.writeLock();
lock.lock();
try {
CountUpdater.CountWriter writer = applyDeltas ? new DeltaTreeWriter(() -> tree.writer(cursorContext), key -> readCountFromTree(key, cursorContext), layout, maxCacheSize) : new TreeWriter(tree.writer(cursorContext));
CountUpdater updater = new CountUpdater(writer, lock);
success = true;
return updater;
} finally {
if (!success) {
lock.unlock();
}
}
}
Aggregations