use of org.neo4j.io.pagecache.context.CursorContext.NULL in project neo4j by neo4j.
the class GBPTreeTest method writeHeaderInDirtyTreeMustNotDeadlock.
@Test
void writeHeaderInDirtyTreeMustNotDeadlock() throws IOException {
try (PageCache pageCache = createPageCache(defaultPageSize * 4)) {
makeDirty(pageCache);
Consumer<PageCursor> headerWriter = pc -> pc.putBytes("failed".getBytes());
try (GBPTree<MutableLong, MutableLong> index = index(pageCache).with(RecoveryCleanupWorkCollector.ignore()).build()) {
index.checkpoint(headerWriter, NULL);
}
verifyHeader(pageCache, "failed".getBytes());
}
}
use of org.neo4j.io.pagecache.context.CursorContext.NULL in project neo4j by neo4j.
the class GBPTreeTest method verifyHeader.
private void verifyHeader(PageCache pageCache, byte[] expectedHeader) throws IOException {
// WHEN
byte[] readHeader = new byte[expectedHeader.length];
AtomicInteger length = new AtomicInteger();
Header.Reader headerReader = headerData -> {
length.set(headerData.limit());
headerData.get(readHeader);
};
// Read as part of construction
// open/close is enough to read header
index(pageCache).with(headerReader).build().close();
// THEN
assertEquals(expectedHeader.length, length.get());
assertArrayEquals(expectedHeader, readHeader);
// WHEN
// Read separate
GBPTree.readHeader(pageCache, indexFile, headerReader, DEFAULT_DATABASE_NAME, NULL);
assertEquals(expectedHeader.length, length.get());
assertArrayEquals(expectedHeader, readHeader);
}
use of org.neo4j.io.pagecache.context.CursorContext.NULL in project neo4j by neo4j.
the class GBPTreeTest method overwriteHeaderMustOnlyOverwriteHeaderNotState.
@Test
void overwriteHeaderMustOnlyOverwriteHeaderNotState() throws Exception {
byte[] initialHeader = new byte[random.nextInt(100)];
random.nextBytes(initialHeader);
Consumer<PageCursor> headerWriter = pc -> pc.putBytes(initialHeader);
try (PageCache pageCache = createPageCache(defaultPageSize)) {
index(pageCache).with(headerWriter).build().close();
Pair<TreeState, TreeState> treeStatesBeforeOverwrite = readTreeStates(pageCache);
byte[] newHeader = new byte[random.nextInt(100)];
random.nextBytes(newHeader);
GBPTree.overwriteHeader(pageCache, indexFile, pc -> pc.putBytes(newHeader), DEFAULT_DATABASE_NAME, NULL);
Pair<TreeState, TreeState> treeStatesAfterOverwrite = readTreeStates(pageCache);
// TreeStates are the same
assertEquals(treeStatesBeforeOverwrite.getLeft(), treeStatesAfterOverwrite.getLeft(), "expected tree state to exactly the same before and after overwriting header");
assertEquals(treeStatesBeforeOverwrite.getRight(), treeStatesAfterOverwrite.getRight(), "expected tree state to exactly the same before and after overwriting header");
// Verify header was actually overwritten. Do this after reading tree states because it will bump tree generation.
verifyHeader(pageCache, newHeader);
}
}
use of org.neo4j.io.pagecache.context.CursorContext.NULL in project neo4j by neo4j.
the class GraphStoreFixture method writableTokenHolders.
public TokenHolders writableTokenHolders() {
TokenHolder propertyKeyTokens = new DelegatingTokenHolder(buildTokenCreator((name, internal, tx, next) -> {
int id = next.propertyKey();
tx.propertyKey(id, name, internal);
return id;
}), TokenHolder.TYPE_PROPERTY_KEY);
TokenHolder labelTokens = new DelegatingTokenHolder(buildTokenCreator((name, internal, tx, next) -> {
int id = next.label();
tx.nodeLabel(id, name, internal);
return id;
}), TokenHolder.TYPE_LABEL);
TokenHolder relationshipTypeTokens = new DelegatingTokenHolder(buildTokenCreator((name, internal, tx, next) -> {
int id = next.relationshipType();
tx.relationshipType(id, name, internal);
return id;
}), TokenHolder.TYPE_RELATIONSHIP_TYPE);
TokenHolders tokenHolders = new TokenHolders(propertyKeyTokens, labelTokens, relationshipTypeTokens);
tokenHolders.setInitialTokens(allReadableTokens(directStoreAccess().nativeStores()), NULL);
return tokenHolders;
}
use of org.neo4j.io.pagecache.context.CursorContext.NULL in project neo4j by neo4j.
the class AbstractLogTailScannerTest method logFile.
LogCreator logFile(Entry... entries) {
return (logVersion, positions) -> {
try {
AtomicLong lastTxId = new AtomicLong();
logVersionRepository.setCurrentLogVersion(logVersion, NULL);
logVersionRepository.setCheckpointLogVersion(logVersion, NULL);
LifeSupport logFileLife = new LifeSupport();
logFileLife.start();
logFileLife.add(logFiles);
LogFile logFile = logFiles.getLogFile();
var checkpointFile = logFiles.getCheckpointFile();
int previousChecksum = BASE_TX_CHECKSUM;
try {
TransactionLogWriter logWriter = logFile.getTransactionLogWriter();
LogEntryWriter writer = logWriter.getWriter();
for (Entry entry : entries) {
LogPosition currentPosition = logWriter.getCurrentPosition();
positions.put(entry, currentPosition);
if (entry instanceof StartEntry) {
writer.writeStartEntry(0, 0, previousChecksum, new byte[0]);
} else if (entry instanceof CommitEntry) {
CommitEntry commitEntry = (CommitEntry) entry;
previousChecksum = writer.writeCommitEntry(commitEntry.txId, 0);
lastTxId.set(commitEntry.txId);
} else if (entry instanceof CheckPointEntry) {
CheckPointEntry checkPointEntry = (CheckPointEntry) entry;
Entry target = checkPointEntry.withPositionOfEntry;
LogPosition logPosition = target != null ? positions.get(target) : currentPosition;
assert logPosition != null : "No registered log position for " + target;
writeCheckpoint(writer, checkpointFile, logPosition);
} else if (entry instanceof PositionEntry) {
// Don't write anything, this entry is just for registering a position so that
// another CheckPointEntry can refer to it
} else {
throw new IllegalArgumentException("Unknown entry " + entry);
}
}
} finally {
logFileLife.shutdown();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
Aggregations