use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class SwitchToSlaveCopyThenBranch method stopServicesAndHandleBranchedStore.
void stopServicesAndHandleBranchedStore(BranchedDataPolicy branchPolicy, URI masterUri, URI me, CancellationRequest cancellationRequest) throws Throwable {
MoveAfterCopy moveWithCopyThenBranch = (moves, fromDirectory, toDirectory) -> {
stopServices();
msgLog.debug("Branching store: " + storeDir);
branchPolicy.handle(storeDir, pageCache, logService);
msgLog.debug("Moving downloaded store from " + fromDirectory + " to " + toDirectory);
MoveAfterCopy.moveReplaceExisting().move(moves, fromDirectory, toDirectory);
msgLog.debug("Moved downloaded store from " + fromDirectory + " to " + toDirectory);
};
copyStore(masterUri, me, cancellationRequest, moveWithCopyThenBranch);
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class StoreCopyClientTest method shouldDeleteTempCopyFolderOnFailures.
@Test
public void shouldDeleteTempCopyFolderOnFailures() throws Exception {
// GIVEN
File initialStore = testDir.directory("initialStore");
File backupStore = testDir.directory("backupStore");
PageCache pageCache = pageCacheRule.getPageCache(fileSystem);
GraphDatabaseService initialDatabase = createInitialDatabase(initialStore);
StoreCopyClient copier = new StoreCopyClient(backupStore, Config.empty(), loadKernelExtensions(), NullLogProvider.getInstance(), fileSystem, pageCache, new StoreCopyClient.Monitor.Adapter(), false);
CancellationRequest falseCancellationRequest = () -> false;
RuntimeException exception = new RuntimeException("Boom!");
StoreCopyClient.StoreCopyRequester storeCopyRequest = new LocalStoreCopyRequester((GraphDatabaseAPI) initialDatabase, initialStore, fileSystem) {
@Override
public void done() {
throw exception;
}
};
// WHEN
try {
copier.copyStore(storeCopyRequest, falseCancellationRequest, MoveAfterCopy.moveReplaceExisting());
fail("should have thrown ");
} catch (RuntimeException ex) {
assertEquals(exception, ex);
}
// THEN
assertFalse(new File(backupStore, TEMP_COPY_DIRECTORY_NAME).exists());
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class StoreCopyClientTest method shouldResetNeoStoreLastTransactionOffsetForNonForensicCopy.
@Test
public void shouldResetNeoStoreLastTransactionOffsetForNonForensicCopy() throws Exception {
// GIVEN
File initialStore = testDir.directory("initialStore");
File backupStore = testDir.directory("backupStore");
PageCache pageCache = pageCacheRule.getPageCache(fileSystem);
createInitialDatabase(initialStore);
long originalTransactionOffset = MetaDataStore.getRecord(pageCache, new File(initialStore, MetaDataStore.DEFAULT_NAME), MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET);
GraphDatabaseService initialDatabase = startDatabase(initialStore);
StoreCopyClient copier = new StoreCopyClient(backupStore, Config.empty(), loadKernelExtensions(), NullLogProvider.getInstance(), fileSystem, pageCache, new StoreCopyClient.Monitor.Adapter(), false);
CancellationRequest falseCancellationRequest = () -> false;
StoreCopyClient.StoreCopyRequester storeCopyRequest = new LocalStoreCopyRequester((GraphDatabaseAPI) initialDatabase, initialStore, fileSystem);
// WHEN
copier.copyStore(storeCopyRequest, falseCancellationRequest, MoveAfterCopy.moveReplaceExisting());
// THEN
long updatedTransactionOffset = MetaDataStore.getRecord(pageCache, new File(backupStore, MetaDataStore.DEFAULT_NAME), MetaDataStore.Position.LAST_CLOSED_TRANSACTION_LOG_BYTE_OFFSET);
assertNotEquals(originalTransactionOffset, updatedTransactionOffset);
assertEquals(LogHeader.LOG_HEADER_SIZE, updatedTransactionOffset);
assertFalse(new File(backupStore, TEMP_COPY_DIRECTORY_NAME).exists());
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class ToFileStoreWriterTest method shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles.
@Test
public void shouldLetPageCacheHandleRecordStoresAndNativeLabelScanStoreFiles() throws Exception {
// GIVEN
List<FileMoveAction> actions = new ArrayList<>();
PageCache pageCache = spy(pageCacheRule.getPageCache(fs));
ToFileStoreWriter writer = new ToFileStoreWriter(directory.absolutePath(), fs, new StoreCopyClient.Monitor.Adapter(), pageCache, actions);
ByteBuffer tempBuffer = ByteBuffer.allocate(128);
// WHEN
for (StoreType type : StoreType.values()) {
if (type.isRecordStore()) {
String fileName = type.getStoreFile().fileName(STORE);
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, fileName);
}
}
writeAndVerifyWrittenThroughPageCache(pageCache, writer, tempBuffer, NativeLabelScanStore.FILE_NAME);
}
use of org.neo4j.io.pagecache.PageCache in project neo4j by neo4j.
the class BackupStoreCopyInteractionStressTesting method shouldBehaveCorrectlyUnderStress.
@Test
public void shouldBehaveCorrectlyUnderStress() throws Exception {
int numberOfCores = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_CORES", DEFAULT_NUMBER_OF_CORES));
int numberOfEdges = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_NUMBER_OF_EDGES", DEFAULT_NUMBER_OF_EDGES));
long durationInMinutes = parseLong(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_DURATION", DEFAULT_DURATION_IN_MINUTES));
String workingDirectory = fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_WORKING_DIRECTORY", DEFAULT_WORKING_DIR);
int baseCoreBackupPort = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_BASE_CORE_BACKUP_PORT", DEFAULT_BASE_CORE_BACKUP_PORT));
int baseEdgeBackupPort = parseInt(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_BASE_EDGE_BACKUP_PORT", DEFAULT_BASE_EDGE_BACKUP_PORT));
boolean enableIndexes = parseBoolean(fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_ENABLE_INDEXES", DEFAULT_ENABLE_INDEXES));
String txPrune = fromEnv("BACKUP_STORE_COPY_INTERACTION_STRESS_TX_PRUNE", DEFAULT_TX_PRUNE);
File clusterDirectory = ensureExistsAndEmpty(new File(workingDirectory, "cluster"));
File backupDirectory = ensureExistsAndEmpty(new File(workingDirectory, "backups"));
BiFunction<Boolean, Integer, SocketAddress> backupAddress = (isCore, id) -> new AdvertisedSocketAddress("localhost", (isCore ? baseCoreBackupPort : baseEdgeBackupPort) + id);
Map<String, String> coreParams = enableRaftMessageLogging(configureRaftLogRotationAndPruning(configureTxLogRotationAndPruning(new HashMap<>(), txPrune)));
Map<String, String> readReplicaParams = configureTxLogRotationAndPruning(new HashMap<>(), txPrune);
Map<String, IntFunction<String>> instanceCoreParams = configureBackup(new HashMap<>(), id -> backupAddress.apply(true, id));
Map<String, IntFunction<String>> instanceReadReplicaParams = configureBackup(new HashMap<>(), id -> backupAddress.apply(false, id));
HazelcastDiscoveryServiceFactory discoveryServiceFactory = new HazelcastDiscoveryServiceFactory();
Cluster cluster = new Cluster(clusterDirectory, numberOfCores, numberOfEdges, discoveryServiceFactory, coreParams, instanceCoreParams, readReplicaParams, instanceReadReplicaParams, Standard.LATEST_NAME);
AtomicBoolean stopTheWorld = new AtomicBoolean();
BooleanSupplier notExpired = untilTimeExpired(durationInMinutes, MINUTES);
BooleanSupplier keepGoing = () -> !stopTheWorld.get() && notExpired.getAsBoolean();
Runnable onFailure = () -> stopTheWorld.set(true);
ExecutorService service = Executors.newFixedThreadPool(3);
try {
cluster.start();
if (enableIndexes) {
Workload.setupIndexes(cluster);
}
Future<Throwable> workload = service.submit(new Workload(keepGoing, onFailure, cluster));
Future<Throwable> startStopWorker = service.submit(new StartStopLoad(fs, pageCache, keepGoing, onFailure, cluster, numberOfCores, numberOfEdges));
Future<Throwable> backupWorker = service.submit(new BackupLoad(keepGoing, onFailure, cluster, numberOfCores, numberOfEdges, backupDirectory, backupAddress));
long timeout = durationInMinutes + 5;
assertNull(Exceptions.stringify(workload.get()), workload.get(timeout, MINUTES));
assertNull(Exceptions.stringify(startStopWorker.get()), startStopWorker.get(timeout, MINUTES));
assertNull(Exceptions.stringify(backupWorker.get()), backupWorker.get(timeout, MINUTES));
} finally {
cluster.shutdown();
service.shutdown();
}
// let's cleanup disk space when everything went well
FileUtils.deleteRecursively(clusterDirectory);
FileUtils.deleteRecursively(backupDirectory);
}
Aggregations