use of org.neo4j.graphdb.DependencyResolver in project neo4j by neo4j.
the class BackupService method incrementalWithContext.
/**
* Performs an incremental backup based off the given context. This means
* receiving and applying selectively (i.e. irrespective of the actual state
* of the target db) a set of transactions starting at the desired txId and
* spanning up to the latest of the master
*
* @param targetDb The database that contains a previous full copy
* @param context The context, containing transaction id to start streaming transaction from
* @return A backup context, ready to perform
*/
private BackupOutcome incrementalWithContext(String sourceHostNameOrIp, int sourcePort, GraphDatabaseAPI targetDb, long timeout, RequestContext context) throws IncrementalBackupNotPossibleException {
DependencyResolver resolver = targetDb.getDependencyResolver();
ProgressTxHandler handler = new ProgressTxHandler();
TransactionCommittingResponseUnpacker unpacker = new TransactionCommittingResponseUnpacker(resolver, DEFAULT_BATCH_SIZE, 0);
Monitors monitors = resolver.resolveDependency(Monitors.class);
LogProvider logProvider = resolver.resolveDependency(LogService.class).getInternalLogProvider();
BackupClient client = new BackupClient(sourceHostNameOrIp, sourcePort, null, logProvider, targetDb.storeId(), timeout, unpacker, monitors.newMonitor(ByteCounterMonitor.class, BackupClient.class), monitors.newMonitor(RequestMonitor.class, BackupClient.class), new VersionAwareLogEntryReader<>());
try (Lifespan lifespan = new Lifespan(unpacker, client)) {
try (Response<Void> response = client.incrementalBackup(context)) {
unpacker.unpackResponse(response, handler);
}
} catch (MismatchingStoreIdException e) {
throw new RuntimeException(DIFFERENT_STORE, e);
} catch (RuntimeException | IOException e) {
if (e.getCause() != null && e.getCause() instanceof MissingLogDataException) {
throw new IncrementalBackupNotPossibleException(TOO_OLD_BACKUP, e.getCause());
}
if (e.getCause() != null && e.getCause() instanceof ConnectException) {
throw new RuntimeException(e.getMessage(), e.getCause());
}
throw new RuntimeException("Failed to perform incremental backup.", e);
} catch (Throwable throwable) {
throw new RuntimeException("Unexpected error", throwable);
}
return new BackupOutcome(handler.getLastSeenTransactionId(), true);
}
use of org.neo4j.graphdb.DependencyResolver in project neo4j by neo4j.
the class IndexSamplingManagerBeanTest method setup.
@Before
public void setup() {
dataSource = mock(NeoStoreDataSource.class);
storeReadLayer = mock(StoreReadLayer.class);
indexingService = mock(IndexingService.class);
when(dataSource.getStoreLayer()).thenReturn(storeReadLayer);
when(storeReadLayer.labelGetForName(EXISTING_LABEL)).thenReturn(LABEL_ID);
when(storeReadLayer.propertyKeyGetForName(EXISTING_PROPERTY)).thenReturn(PROPERTY_ID);
when(storeReadLayer.propertyKeyGetForName(NON_EXISTING_PROPERTY)).thenReturn(-1);
when(storeReadLayer.labelGetForName(NON_EXISTING_LABEL)).thenReturn(-1);
DependencyResolver resolver = mock(DependencyResolver.class);
when(resolver.resolveDependency(IndexingService.class)).thenReturn(indexingService);
when(dataSource.getDependencyResolver()).thenReturn(resolver);
}
use of org.neo4j.graphdb.DependencyResolver in project neo4j by neo4j.
the class StoreMigratorFrom21IT method mustMendDuplicatePropertiesWhenUpgradingFromVersion21.
@Test
public void mustMendDuplicatePropertiesWhenUpgradingFromVersion21() throws Exception {
// The rules:
// If an index is present, all duplicates should be removed and the property set to the value in the index
// If an index is not present, the property should be set to the value of the last duplicate in the property
// chain, all duplicates except the first should be removed
// If an index is not present, the first property in the duplicate chain should be kept for the users
// benefit, moved to a special property value, `__DUPLICATE_<propkey>`
//
// This is the broken store that we are upgrading:
//
// (#0:Label { keyA: "actual", keyA: "phony!", keyA: "phony!" })
// (#1 { keyA: "actual", keyA: "actual", keyA: "actual" })
// (#2:Label { keyA: "real1", keyA: "phony", keyA: "phony", keyD: "real2", keyD: "phony", keyD: "phony" })
// (#3 { keyA: "real1", keyA: "phony", keyA: "phony", keyD: "real2", keyD: "phony", keyD: "phony" })
// (#4 { keyA: "actual", keyB: "actual", keyC: "actual" })
// (#0)-[#0:REL { keyA: "actual", keyA: "actual", keyA: "actual" }]->(#1)
// (#0)-[#1:REL { keyA: "real1", keyA: "phony", keyA: "phony",
// keyD: "real2", keyE: "phony", keyF: "phony" }]->(#1)
// (#2)-[#2:REL { keyA: "actual", keyB: "actual", keyC: "actual" }]->(#0)
//
// And this is what we want to end up with, after upgrading:
//
// (#0:Label { keyA: "actual" })
// (#1 { keyA: "actual", __DUPLICATE_keyA: "actual" })
// (#2:Label { keyA: "real1", keyD: "real2" })
// (#3 { keyA: "real1", __DUPLICATE_keyA_1: "real1", __DUPLICATE_keyA_2: "real1",
// keyD: "real2", __DUPLICATE_keyD_1: "real2", __DUPLICATE_keyD_2: "real2" })
// (#4 { keyA: "actual", keyB: "actual", keyC: "actual" })
// (#0)-[#0:REL { keyA: "actual", __DUPLICATE_keyA: "actual" }]->(#1)
// (#0)-[#1:REL { keyA: "real1", __DUPLICATE_keyA_1: "real1", __DUPLICATE_keyA_2: "real1",
// keyD: "real2", __DUPLICATE_keyD_1: "real2", __DUPLICATE_keyD_2: "real2" }]->(#1)
// (#2)-[#2:REL { keyA: "actual", keyB: "actual", keyC: "actual" }]->(#0)
File dir = MigrationTestUtils.find21FormatStoreDirectoryWithDuplicateProperties(storeDir.directory());
TestGraphDatabaseFactory factory = new TestGraphDatabaseFactory();
GraphDatabaseBuilder builder = factory.newEmbeddedDatabaseBuilder(dir).setConfig(GraphDatabaseSettings.allow_store_upgrade, "true");
GraphDatabaseService database = builder.newGraphDatabase();
database.shutdown();
ConsistencyCheckService service = new ConsistencyCheckService();
ConsistencyCheckService.Result result = service.runFullConsistencyCheck(dir.getAbsoluteFile(), Config.empty(), ProgressMonitorFactory.NONE, NullLogProvider.getInstance(), false);
assertTrue(result.isSuccessful());
database = builder.newGraphDatabase();
// Upgrade is now completed. Verify the contents:
DependencyResolver dependencyResolver = ((GraphDatabaseAPI) database).getDependencyResolver();
// Verify that the properties appear correct to the outside world:
try (Transaction ignore = database.beginTx()) {
verifyProperties(database.getNodeById(0), Pair.of("keyA", new Object[] { "actual", "phony!", "phony!" }));
verifyProperties(database.getNodeById(1), Pair.of("keyA", new Object[] { "actual", "actual", "actual" }));
verifyProperties(database.getNodeById(2), Pair.of("keyA", new Object[] { "real1", "phony", "phony" }), Pair.of("keyD", new Object[] { "real2", "phony", "phony" }));
verifyProperties(database.getNodeById(3), Pair.of("keyA", new Object[] { "real1", "real1", "real1" }), Pair.of("keyD", new Object[] { "real2", "real2", "real2" }));
verifyProperties(database.getNodeById(4), Pair.of("keyA", new Object[] { "actual" }), Pair.of("keyB", new Object[] { "actual" }), Pair.of("keyC", new Object[] { "actual" }));
verifyProperties(database.getRelationshipById(0), Pair.of("keyA", new Object[] { "actual", "actual", "actual" }));
verifyProperties(database.getRelationshipById(1), Pair.of("keyA", new Object[] { "real1", "real1", "real1" }), Pair.of("keyD", new Object[] { "real2", "real2", "real2" }));
verifyProperties(database.getRelationshipById(2), Pair.of("keyA", new Object[] { "actual" }), Pair.of("keyB", new Object[] { "actual" }), Pair.of("keyC", new Object[] { "actual" }));
}
// Verify that there are no two properties on the entities, that have the same key:
// (This is important because the verification above cannot tell if we have two keys with the same value)
KernelAPI kernel = dependencyResolver.resolveDependency(KernelAPI.class);
try (KernelTransaction tx = kernel.newTransaction(KernelTransaction.Type.implicit, AnonymousContext.read());
Statement statement = tx.acquireStatement()) {
Iterators.asUniqueSet(statement.readOperations().nodeGetPropertyKeys(0));
Iterators.asUniqueSet(statement.readOperations().nodeGetPropertyKeys(1));
Iterators.asUniqueSet(statement.readOperations().nodeGetPropertyKeys(2));
Iterators.asUniqueSet(statement.readOperations().relationshipGetPropertyKeys(0));
Iterators.asUniqueSet(statement.readOperations().relationshipGetPropertyKeys(1));
}
database.shutdown();
}
use of org.neo4j.graphdb.DependencyResolver in project neo4j by neo4j.
the class DependencyResolverSupplierTest method shouldReturnNullIfDataSourceHasBeenUnregistered.
@Test
public void shouldReturnNullIfDataSourceHasBeenUnregistered() throws Exception {
// given
DataSourceManager dataSourceManager = new DataSourceManager();
DependencyResolverSupplier supplier = new DependencyResolverSupplier(dataSourceManager);
NeoStoreDataSource neoStoreDataSource = mock(NeoStoreDataSource.class);
DependencyResolver dependencyResolver = mock(DependencyResolver.class);
when(neoStoreDataSource.getDependencyResolver()).thenReturn(dependencyResolver);
dataSourceManager.register(neoStoreDataSource);
// when
dataSourceManager.unregister(neoStoreDataSource);
// then
assertEquals(null, supplier.get());
}
use of org.neo4j.graphdb.DependencyResolver in project neo4j by neo4j.
the class TestLuceneSchemaBatchInsertIT method shouldLoadAndUseLuceneProvider.
@Test
public void shouldLoadAndUseLuceneProvider() throws Exception {
// GIVEN
File storeDir = testDirectory.graphDbDir();
BatchInserter inserter = BatchInserters.inserter(storeDir, fileSystemRule.get());
inserter.createDeferredSchemaIndex(LABEL).on("name").create();
// WHEN
inserter.createNode(map("name", "Mattias"), LABEL);
inserter.shutdown();
// THEN
GraphDatabaseFactory graphDatabaseFactory = new TestGraphDatabaseFactory();
GraphDatabaseAPI db = (GraphDatabaseAPI) graphDatabaseFactory.newEmbeddedDatabase(storeDir);
DependencyResolver dependencyResolver = db.getDependencyResolver();
SchemaIndexProvider schemaIndexProvider = dependencyResolver.resolveDependency(SchemaIndexProvider.class, HighestSelectionStrategy.getInstance());
// assert the indexProvider is a Lucene one
try (Transaction ignore = db.beginTx()) {
IndexDefinition indexDefinition = Iterables.single(db.schema().getIndexes(LABEL));
assertThat(db.schema().getIndexState(indexDefinition), is(Schema.IndexState.ONLINE));
assertThat(schemaIndexProvider, instanceOf(LuceneSchemaIndexProvider.class));
}
// CLEANUP
db.shutdown();
}
Aggregations