use of org.neo4j.scheduler.JobScheduler in project neo4j by neo4j.
the class FulltextIndexProviderTest method indexWithUnknownAnalyzerWillBeMarkedAsFailedOnStartup.
@Test
void indexWithUnknownAnalyzerWillBeMarkedAsFailedOnStartup() throws Exception {
// Create a full-text index.
long indexId;
try (KernelTransactionImplementation transaction = getKernelTransaction()) {
int[] propertyIds = { propIdHa };
SchemaDescriptor schema = SchemaDescriptor.fulltext(EntityType.NODE, new int[] { labelIdHa }, propertyIds);
IndexPrototype prototype = IndexPrototype.forSchema(schema).withIndexType(FULLTEXT).withName(NAME);
SchemaWrite schemaWrite = transaction.schemaWrite();
IndexDescriptor index = schemaWrite.indexCreate(prototype);
indexId = index.getId();
transaction.success();
}
// Modify the full-text index such that it has an analyzer configured that does not exist.
controller.restartDbms(builder -> {
var cacheTracer = NULL;
FileSystemAbstraction fs = builder.getFileSystem();
DatabaseLayout databaseLayout = Neo4jLayout.of(builder.getHomeDirectory()).databaseLayout(DEFAULT_DATABASE_NAME);
DefaultIdGeneratorFactory idGenFactory = new DefaultIdGeneratorFactory(fs, RecoveryCleanupWorkCollector.ignore(), databaseLayout.getDatabaseName());
try (JobScheduler scheduler = JobSchedulerFactory.createInitialisedScheduler();
PageCache pageCache = StandalonePageCacheFactory.createPageCache(fs, scheduler, cacheTracer)) {
StoreFactory factory = new StoreFactory(databaseLayout, Config.defaults(), idGenFactory, pageCache, fs, NullLogProvider.getInstance(), cacheTracer, writable());
var cursorContext = CursorContext.NULL;
try (NeoStores neoStores = factory.openAllNeoStores(false)) {
TokenHolders tokens = StoreTokens.readOnlyTokenHolders(neoStores, CursorContext.NULL);
SchemaStore schemaStore = neoStores.getSchemaStore();
SchemaStorage storage = new SchemaStorage(schemaStore, tokens, () -> KernelVersion.LATEST);
IndexDescriptor index = (IndexDescriptor) storage.loadSingleSchemaRule(indexId, CursorContext.NULL);
Map<String, Value> indexConfigMap = new HashMap<>(index.getIndexConfig().asMap());
for (Map.Entry<String, Value> entry : indexConfigMap.entrySet()) {
if (entry.getKey().contains("analyzer")) {
// This analyzer does not exist!
entry.setValue(Values.stringValue("bla-bla-lyzer"));
}
}
index = index.withIndexConfig(IndexConfig.with(indexConfigMap));
storage.writeSchemaRule(index, cursorContext, INSTANCE);
schemaStore.flush(cursorContext);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return builder;
});
// Verify that the index comes up in a failed state.
try (Transaction tx = db.beginTx()) {
IndexDefinition index = tx.schema().getIndexByName(NAME);
Schema.IndexState indexState = tx.schema().getIndexState(index);
assertThat(indexState).isEqualTo(Schema.IndexState.FAILED);
String indexFailure = tx.schema().getIndexFailure(index);
assertThat(indexFailure).contains("bla-bla-lyzer");
}
// Verify that the failed index can be dropped.
try (Transaction tx = db.beginTx()) {
tx.schema().getIndexByName(NAME).drop();
assertThrows(IllegalArgumentException.class, () -> tx.schema().getIndexByName(NAME));
tx.commit();
}
try (Transaction tx = db.beginTx()) {
assertThrows(IllegalArgumentException.class, () -> tx.schema().getIndexByName(NAME));
}
controller.restartDbms();
try (Transaction tx = db.beginTx()) {
assertThrows(IllegalArgumentException.class, () -> tx.schema().getIndexByName(NAME));
}
}
use of org.neo4j.scheduler.JobScheduler in project neo4j by neo4j.
the class FulltextIndexProviderFactory method newInstance.
@Override
public Lifecycle newInstance(ExtensionContext context, Dependencies dependencies) {
Config config = dependencies.getConfig();
boolean ephemeral = config.get(GraphDatabaseInternalSettings.ephemeral_lucene);
FileSystemAbstraction fileSystemAbstraction = dependencies.fileSystem();
DirectoryFactory directoryFactory = directoryFactory(ephemeral);
JobScheduler scheduler = dependencies.scheduler();
IndexDirectoryStructure.Factory directoryStructureFactory = subProviderDirectoryStructure(context.directory());
TokenHolders tokenHolders = dependencies.tokenHolders();
Log log = dependencies.getLogService().getInternalLog(FulltextIndexProvider.class);
var readOnlyChecker = dependencies.readOnlyChecker();
if (OperationalMode.SINGLE != context.dbmsInfo().operationalMode) {
// if running as part of cluster indexes should be writable to allow catchup process to accept transactions
readOnlyChecker = DatabaseReadOnlyChecker.writable();
}
return new FulltextIndexProvider(DESCRIPTOR, directoryStructureFactory, fileSystemAbstraction, config, tokenHolders, directoryFactory, readOnlyChecker, scheduler, log);
}
use of org.neo4j.scheduler.JobScheduler in project neo4j by neo4j.
the class ParallelBatchImporterTest method shouldImportCsvData.
@ParameterizedTest
@MethodSource("params")
void shouldImportCsvData(InputIdGenerator inputIdGenerator, IdType idType) throws Exception {
this.inputIdGenerator = inputIdGenerator;
// GIVEN
ExecutionMonitor processorAssigner = ProcessorAssignmentStrategies.eagerRandomSaturation(config.maxNumberOfProcessors());
CapturingMonitor monitor = new CapturingMonitor(processorAssigner);
boolean successful = false;
Groups groups = new Groups();
IdGroupDistribution groupDistribution = new IdGroupDistribution(NODE_COUNT, NUMBER_OF_ID_GROUPS, random.random(), groups);
long nodeRandomSeed = random.nextLong();
long relationshipRandomSeed = random.nextLong();
var pageCacheTracer = new DefaultPageCacheTracer();
JobScheduler jobScheduler = new ThreadPoolJobScheduler();
// This will have statistically half the nodes be considered dense
Config dbConfig = Config.defaults(GraphDatabaseSettings.dense_node_threshold, RELATIONSHIPS_PER_NODE * 2);
IndexImporterFactoryImpl indexImporterFactory = new IndexImporterFactoryImpl(dbConfig);
final BatchImporter inserter = new ParallelBatchImporter(databaseLayout, fs, pageCacheTracer, config, NullLogService.getInstance(), monitor, EMPTY, dbConfig, getFormat(), ImportLogic.NO_MONITOR, jobScheduler, Collector.EMPTY, TransactionLogInitializer.getLogFilesInitializer(), indexImporterFactory, INSTANCE);
LongAdder propertyCount = new LongAdder();
LongAdder relationshipCount = new LongAdder();
try {
// WHEN
inserter.doImport(Input.input(nodes(nodeRandomSeed, NODE_COUNT, config.batchSize(), inputIdGenerator, groupDistribution, propertyCount), relationships(relationshipRandomSeed, RELATIONSHIP_COUNT, config.batchSize(), inputIdGenerator, groupDistribution, propertyCount, relationshipCount), idType, knownEstimates(NODE_COUNT, RELATIONSHIP_COUNT, NODE_COUNT * TOKENS.length / 2, RELATIONSHIP_COUNT * TOKENS.length / 2, NODE_COUNT * TOKENS.length / 2 * Long.BYTES, RELATIONSHIP_COUNT * TOKENS.length / 2 * Long.BYTES, NODE_COUNT * TOKENS.length / 2), groups));
assertThat(pageCacheTracer.pins()).isGreaterThan(0);
assertThat(pageCacheTracer.pins()).isEqualTo(pageCacheTracer.unpins());
assertThat(pageCacheTracer.pins()).isEqualTo(Math.addExact(pageCacheTracer.faults(), pageCacheTracer.hits()));
// THEN
DatabaseManagementService managementService = getDBMSBuilder(databaseLayout).build();
GraphDatabaseService db = managementService.database(DEFAULT_DATABASE_NAME);
try (Transaction tx = db.beginTx()) {
inputIdGenerator.reset();
verifyData(NODE_COUNT, RELATIONSHIP_COUNT, db, tx, groupDistribution, nodeRandomSeed, relationshipRandomSeed);
tx.commit();
} finally {
managementService.shutdown();
}
assertConsistent(databaseLayout);
successful = true;
} finally {
jobScheduler.close();
if (!successful) {
Path failureFile = databaseLayout.databaseDirectory().resolve("input");
try (PrintStream out = new PrintStream(Files.newOutputStream(failureFile))) {
out.println("Seed used in this failing run: " + random.seed());
out.println(inputIdGenerator);
inputIdGenerator.reset();
out.println();
out.println("Processor assignments");
out.println(processorAssigner.toString());
}
System.err.println("Additional debug information stored in " + failureFile);
}
}
}
use of org.neo4j.scheduler.JobScheduler in project neo4j by neo4j.
the class CsvInputBatchImportIT method shouldImportDataComingFromCsvFiles.
@Test
void shouldImportDataComingFromCsvFiles() throws Exception {
// GIVEN
Config dbConfig = Config.newBuilder().set(db_timezone, LogTimeZone.SYSTEM).set(dense_node_threshold, 5).build();
try (JobScheduler scheduler = new ThreadPoolJobScheduler()) {
BatchImporter importer = new ParallelBatchImporter(databaseLayout, fileSystem, PageCacheTracer.NULL, smallBatchSizeConfig(), NullLogService.getInstance(), ExecutionMonitor.INVISIBLE, EMPTY, dbConfig, defaultFormat(), ImportLogic.NO_MONITOR, scheduler, Collector.EMPTY, TransactionLogInitializer.getLogFilesInitializer(), new IndexImporterFactoryImpl(dbConfig), INSTANCE);
List<InputEntity> nodeData = randomNodeData();
List<InputEntity> relationshipData = randomRelationshipData(nodeData);
// WHEN
importer.doImport(csv(nodeDataAsFile(nodeData), relationshipDataAsFile(relationshipData), IdType.STRING, lowBufferSize(COMMAS)));
// THEN
verifyImportedData(nodeData, relationshipData);
}
}
Aggregations