use of org.neo4j.test.DoubleLatch in project neo4j by neo4j.
the class TransactionIT method shouldTerminateQueriesEvenIfUsingPeriodicCommit.
@Test
public void shouldTerminateQueriesEvenIfUsingPeriodicCommit() throws Exception {
// Spawns a throttled HTTP server, runs a PERIODIC COMMIT that fetches data from this server,
// and checks that the query able to be terminated
// We start with 3, because that is how many actors we have -
// 1. the http server
// 2. the running query
// 3. the one terminating 2
final DoubleLatch latch = new DoubleLatch(3, true);
// This is used to block the http server between the first and second batch
final Barrier.Control barrier = new Barrier.Control();
// Serve CSV via local web server, let Jetty find a random port for us
Server server = createHttpServer(latch, barrier, 20, 30);
server.start();
int localPort = getLocalPort(server);
final BoltStateMachine[] machine = { null };
Thread thread = new Thread() {
@Override
public void run() {
try (BoltStateMachine stateMachine = env.newMachine(new BoltConnectionDescriptor(new InetSocketAddress("<testClient>", 56789), new InetSocketAddress("<writeServer>", 7468)))) {
machine[0] = stateMachine;
stateMachine.init(USER_AGENT, emptyMap(), null);
String query = format("USING PERIODIC COMMIT 10 LOAD CSV FROM 'http://localhost:%d' AS line " + "CREATE (n:A {id: line[0], square: line[1]}) " + "WITH count(*) as number " + "CREATE (n:ShouldNotExist)", localPort);
try {
latch.start();
stateMachine.run(query, emptyMap(), nullResponseHandler());
stateMachine.pullAll(nullResponseHandler());
} finally {
latch.finish();
}
} catch (BoltConnectionFatality connectionFatality) {
throw new RuntimeException(connectionFatality);
}
}
};
thread.setName("query runner");
thread.start();
// We block this thread here, waiting for the http server to spin up and the running query to get started
latch.startAndWaitForAllToStart();
Thread.sleep(1000);
// This is the call that RESETs the Bolt connection and will terminate the running query
machine[0].reset(nullResponseHandler());
barrier.release();
// We block again here, waiting for the running query to have been terminated, and for the server to have
// wrapped up and finished streaming http results
latch.finishAndWaitForAllToFinish();
// And now we check that the last node did not get created
try (Transaction ignored = env.graph().beginTx()) {
assertFalse("Query was not terminated in time - nodes were created!", env.graph().findNodes(Label.label("ShouldNotExist")).hasNext());
}
}
use of org.neo4j.test.DoubleLatch in project neo4j by neo4j.
the class IndexingServiceTest method shouldWaitForRecoveredUniquenessConstraintIndexesToBeFullyPopulated.
@Test
public void shouldWaitForRecoveredUniquenessConstraintIndexesToBeFullyPopulated() throws Exception {
// I.e. when a uniqueness constraint is created, but database crashes before that schema record
// ends up in the store, so that next start have no choice but to rebuild it.
// GIVEN
final DoubleLatch latch = new DoubleLatch();
ControlledIndexPopulator populator = new ControlledIndexPopulator(latch);
final AtomicLong indexId = new AtomicLong(-1);
IndexingService.Monitor monitor = new IndexingService.MonitorAdapter() {
@Override
public void awaitingPopulationOfRecoveredIndex(long index, NewIndexDescriptor descriptor) {
// When we see that we start to await the index to populate, notify the slow-as-heck
// populator that it can actually go and complete its job.
indexId.set(index);
latch.startAndWaitForAllToStart();
}
};
// leaving out the IndexRule here will have the index being populated from scratch
IndexingService indexing = newIndexingServiceWithMockedDependencies(populator, accessor, withData(addNodeUpdate(0, "value", 1)), monitor);
// WHEN initializing, i.e. preparing for recovery
life.init();
// simulating an index being created as part of applying recovered transactions
long fakeOwningConstraintRuleId = 1;
indexing.createIndexes(constraintIndexRule(2, labelId, propertyKeyId, PROVIDER_DESCRIPTOR, fakeOwningConstraintRuleId));
// and then starting, i.e. considering recovery completed
life.start();
// THEN afterwards the index should be ONLINE
assertEquals(2, indexId.get());
assertEquals(ONLINE, indexing.getIndexProxy(indexId.get()).getState());
}
use of org.neo4j.test.DoubleLatch in project neo4j by neo4j.
the class ControlledPopulationSchemaIndexProvider method installPopulationJobCompletionLatch.
public DoubleLatch installPopulationJobCompletionLatch() {
final DoubleLatch populationCompletionLatch = new DoubleLatch();
mockedPopulator = new IndexPopulator.Adapter() {
@Override
public void create() throws IOException {
populationCompletionLatch.startAndWaitForAllToStartAndFinish();
super.create();
}
@Override
public IndexSample sampleResult() {
return new IndexSample();
}
};
return populationCompletionLatch;
}
use of org.neo4j.test.DoubleLatch in project neo4j by neo4j.
the class ContractCheckingIndexProxyTest method shouldNotCloseWhileForcing.
@Test(expected = /* THEN */
IllegalStateException.class)
public void shouldNotCloseWhileForcing() throws IOException {
// GIVEN
final DoubleLatch latch = new DoubleLatch();
final IndexProxy inner = new IndexProxyAdapter() {
@Override
public void force() {
latch.startAndWaitForAllToStartAndFinish();
}
};
final IndexProxy outer = newContractCheckingIndexProxy(inner);
outer.start();
// WHEN
runInSeparateThread(() -> outer.force());
try {
latch.waitForAllToStart();
outer.close();
} finally {
latch.finish();
}
}
use of org.neo4j.test.DoubleLatch in project neo4j by neo4j.
the class IndexSamplingControllerTest method shouldNotEmptyQueueConcurrently.
@Test
public void shouldNotEmptyQueueConcurrently() {
// given
final AtomicInteger totalCount = new AtomicInteger(0);
final AtomicInteger concurrentCount = new AtomicInteger(0);
final DoubleLatch jobLatch = new DoubleLatch();
final DoubleLatch testLatch = new DoubleLatch();
final ThreadLocal<Boolean> hasRun = new ThreadLocal<Boolean>() {
@Override
protected Boolean initialValue() {
return false;
}
};
IndexSamplingJobFactory jobFactory = (indexId, proxy) -> {
// make sure we execute this once per thread
if (hasRun.get()) {
return null;
}
hasRun.set(true);
if (!concurrentCount.compareAndSet(0, 1)) {
throw new IllegalStateException("count !== 0 on create");
}
totalCount.incrementAndGet();
jobLatch.waitForAllToStart();
testLatch.startAndWaitForAllToStart();
jobLatch.waitForAllToFinish();
concurrentCount.decrementAndGet();
testLatch.finish();
return null;
};
final IndexSamplingController controller = new IndexSamplingController(samplingConfig, jobFactory, jobQueue, tracker, snapshotProvider, scheduler, always(false));
when(tracker.canExecuteMoreSamplingJobs()).thenReturn(true);
when(indexProxy.getState()).thenReturn(ONLINE);
// when running once
new Thread(runController(controller, BACKGROUND_REBUILD_UPDATED)).start();
jobLatch.startAndWaitForAllToStart();
testLatch.waitForAllToStart();
// then blocking on first job
assertEquals(1, concurrentCount.get());
assertEquals(1, totalCount.get());
// when running a second time
controller.sampleIndexes(BACKGROUND_REBUILD_UPDATED);
// then no concurrent job execution
jobLatch.finish();
testLatch.waitForAllToFinish();
// and finally exactly one job has run to completion
assertEquals(0, concurrentCount.get());
assertEquals(1, totalCount.get());
}
Aggregations