use of org.neo4j.test.OnDemandJobScheduler in project neo4j by neo4j.
the class UpdatePullerSchedulerTest method scheduleUpdatePulling.
@Test
public void scheduleUpdatePulling() throws Throwable {
OnDemandJobScheduler jobScheduler = new OnDemandJobScheduler(false);
UpdatePullerScheduler pullerScheduler = new UpdatePullerScheduler(jobScheduler, NullLogProvider.getInstance(), updatePuller, 10);
// schedule update pulling and run it
pullerScheduler.init();
jobScheduler.runJob();
verify(updatePuller).pullUpdates();
assertNotNull("Job should be scheduled", jobScheduler.getJob());
// stop scheduler - job should be canceled
pullerScheduler.shutdown();
assertNull("Job should be canceled", jobScheduler.getJob());
}
use of org.neo4j.test.OnDemandJobScheduler in project neo4j by neo4j.
the class SegmentedConcurrentStressIT method createRaftLog.
@Override
public SegmentedRaftLog createRaftLog(FileSystemAbstraction fsa, File dir) throws Throwable {
long rotateAtSize = 8 * 1024 * 1024;
LogProvider logProvider = getInstance();
int readerPoolSize = 8;
CoreLogPruningStrategy pruningStrategy = new CoreLogPruningStrategyFactory(raft_log_pruning_strategy.getDefaultValue(), logProvider).newInstance();
SegmentedRaftLog raftLog = new SegmentedRaftLog(fsa, dir, rotateAtSize, new DummyRaftableContentSerializer(), logProvider, readerPoolSize, Clocks.fakeClock(), new OnDemandJobScheduler(), pruningStrategy);
raftLog.start();
return raftLog;
}
use of org.neo4j.test.OnDemandJobScheduler in project neo4j by neo4j.
the class SegmentedRaftLogCursorIT method createRaftLog.
private SegmentedRaftLog createRaftLog(long rotateAtSize, String pruneStrategy) {
if (fileSystem == null) {
fileSystem = new EphemeralFileSystemAbstraction();
}
File directory = new File(RAFT_LOG_DIRECTORY_NAME);
fileSystem.mkdir(directory);
LogProvider logProvider = getInstance();
CoreLogPruningStrategy pruningStrategy = new CoreLogPruningStrategyFactory(pruneStrategy, logProvider).newInstance();
SegmentedRaftLog newRaftLog = new SegmentedRaftLog(fileSystem, directory, rotateAtSize, new DummyRaftableContentSerializer(), logProvider, 8, Clocks.systemClock(), new OnDemandJobScheduler(), pruningStrategy);
life.add(newRaftLog);
life.init();
life.start();
return newRaftLog;
}
use of org.neo4j.test.OnDemandJobScheduler in project neo4j by neo4j.
the class SegmentedRaftLogPartialEntryRecoveryTest method createRaftLog.
private SegmentedRaftLog createRaftLog(long rotateAtSize) {
File directory = new File(RAFT_LOG_DIRECTORY_NAME);
logDirectory = dir.directory(directory.getName());
LogProvider logProvider = getInstance();
CoreLogPruningStrategy pruningStrategy = new CoreLogPruningStrategyFactory("100 entries", logProvider).newInstance();
return new SegmentedRaftLog(fsRule.get(), logDirectory, rotateAtSize, new CoreReplicatedContentMarshal(), logProvider, 8, Clocks.fakeClock(), new OnDemandJobScheduler(), pruningStrategy);
}
use of org.neo4j.test.OnDemandJobScheduler in project neo4j by neo4j.
the class ReplayRaftLog method main.
public static void main(String[] args) throws IOException {
Args arg = Args.parse(args);
String from = arg.get("from");
System.out.println("From is " + from);
String to = arg.get("to");
System.out.println("to is " + to);
File logDirectory = new File(from);
System.out.println("logDirectory = " + logDirectory);
Config config = Config.embeddedDefaults(stringMap());
try (DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction()) {
LogProvider logProvider = getInstance();
CoreLogPruningStrategy pruningStrategy = new CoreLogPruningStrategyFactory(config.get(raft_log_pruning_strategy), logProvider).newInstance();
SegmentedRaftLog log = new SegmentedRaftLog(fileSystem, logDirectory, config.get(raft_log_rotation_size), new CoreReplicatedContentMarshal(), logProvider, config.get(raft_log_reader_pool_size), Clocks.systemClock(), new OnDemandJobScheduler(), pruningStrategy);
// Not really, but we need to have a way to pass in the commit index
long totalCommittedEntries = log.appendIndex();
for (int i = 0; i <= totalCommittedEntries; i++) {
ReplicatedContent content = readLogEntry(log, i).content();
if (content instanceof ReplicatedTransaction) {
ReplicatedTransaction tx = (ReplicatedTransaction) content;
ReplicatedTransactionFactory.extractTransactionRepresentation(tx, new byte[0]).accept(element -> {
System.out.println(element);
return false;
});
}
}
}
}
Aggregations