use of java.util.concurrent.ThreadLocalRandom in project neo4j by neo4j.
the class SequenceLockStressIT method stressTest.
@RepeatRule.Repeat(times = 20)
@Test
public void stressTest() throws Exception {
int[][] data = new int[10][10];
AtomicBoolean stop = new AtomicBoolean();
AtomicInteger writerId = new AtomicInteger();
abstract class Worker implements Runnable {
@Override
public void run() {
try {
doWork();
} finally {
stop.set(true);
}
}
protected abstract void doWork();
}
Worker reader = new Worker() {
@Override
protected void doWork() {
while (!stop.get()) {
ThreadLocalRandom rng = ThreadLocalRandom.current();
int[] record = data[rng.nextInt(data.length)];
long stamp = lock.tryOptimisticReadLock();
int value = record[0];
boolean consistent = true;
for (int i : record) {
consistent &= i == value;
}
if (lock.validateReadLock(stamp) && !consistent) {
throw new AssertionError("inconsistent read");
}
}
}
};
Worker writer = new Worker() {
private volatile long unused;
@Override
protected void doWork() {
int id = writerId.getAndIncrement();
int counter = 1;
ThreadLocalRandom rng = ThreadLocalRandom.current();
int smallSpin = rng.nextInt(5, 50);
int bigSpin = rng.nextInt(100, 1000);
while (!stop.get()) {
if (lock.tryWriteLock()) {
int[] record = data[id];
for (int i = 0; i < record.length; i++) {
record[i] = counter;
for (int j = 0; j < smallSpin; j++) {
unused = rng.nextLong();
}
}
lock.unlockWrite();
}
for (int j = 0; j < bigSpin; j++) {
unused = rng.nextLong();
}
}
}
};
Worker exclusive = new Worker() {
private volatile long unused;
@Override
protected void doWork() {
ThreadLocalRandom rng = ThreadLocalRandom.current();
int spin = rng.nextInt(20, 2000);
while (!stop.get()) {
while (!lock.tryExclusiveLock()) {
}
long sumA = 0;
long sumB = 0;
for (int[] ints : data) {
for (int i : ints) {
sumA += i;
}
}
for (int i = 0; i < spin; i++) {
unused = rng.nextLong();
}
for (int[] record : data) {
for (int value : record) {
sumB += value;
}
Arrays.fill(record, 0);
}
lock.unlockExclusive();
if (sumA != sumB) {
throw new AssertionError("Inconsistent exclusive lock. 'Sum A' = " + sumA + ", 'Sum B' = " + sumB);
}
}
}
};
List<Future<?>> readers = new ArrayList<>();
List<Future<?>> writers = new ArrayList<>();
Future<?> exclusiveFuture = executor.submit(exclusive);
for (int i = 0; i < 20; i++) {
readers.add(executor.submit(reader));
}
for (int i = 0; i < data.length; i++) {
writers.add(executor.submit(writer));
}
long deadline = System.currentTimeMillis() + 1000;
while (!stop.get() && System.currentTimeMillis() < deadline) {
Thread.sleep(20);
}
stop.set(true);
exclusiveFuture.get();
for (Future<?> future : writers) {
future.get();
}
for (Future<?> future : readers) {
future.get();
}
}
use of java.util.concurrent.ThreadLocalRandom in project neo4j by neo4j.
the class PhysicalLogFileRotateAndReadRaceIT method shouldNotSeeEmptyLogFileWhenReadingTransactionStream.
@Test
public void shouldNotSeeEmptyLogFileWhenReadingTransactionStream() throws Exception {
// GIVEN
PhysicalLogFiles logFiles = new PhysicalLogFiles(directory.directory(), fileSystemRule.get());
LogVersionRepository logVersionRepository = new DeadSimpleLogVersionRepository(0);
PhysicalLogFile.Monitor monitor = mock(PhysicalLogFile.Monitor.class);
LogHeaderCache headerCache = new LogHeaderCache(10);
PhysicalLogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, kibiBytes(1), () -> 2L, logVersionRepository, monitor, headerCache));
FlushablePositionAwareChannel writer = logFile.getWriter();
LogPositionMarker startPosition = new LogPositionMarker();
writer.getCurrentPosition(startPosition);
// WHEN
AtomicBoolean end = new AtomicBoolean();
byte[] dataChunk = new byte[100];
// one thread constantly writing to and rotating the channel
AtomicInteger rotations = new AtomicInteger();
CountDownLatch startSignal = new CountDownLatch(1);
Future<Void> writeFuture = t2.execute(ignored -> {
ThreadLocalRandom random = ThreadLocalRandom.current();
startSignal.countDown();
while (!end.get()) {
writer.put(dataChunk, random.nextInt(1, dataChunk.length));
if (logFile.rotationNeeded()) {
logFile.rotate();
writer.getCurrentPosition(startPosition);
rotations.incrementAndGet();
}
}
return null;
});
assertTrue(startSignal.await(10, SECONDS));
// one thread reading through the channel
long maxEndTime = currentTimeMillis() + LIMIT_TIME;
int reads = 0;
try {
for (; currentTimeMillis() < maxEndTime && reads < LIMIT_READS && rotations.get() < LIMIT_ROTATIONS; reads++) {
try (ReadableLogChannel reader = logFile.getReader(startPosition.newPosition())) {
deplete(reader);
}
}
} finally {
end.set(true);
writeFuture.get();
}
// THEN simply getting here means this was successful
}
use of java.util.concurrent.ThreadLocalRandom in project neo4j by neo4j.
the class PageCacheSlowTest method mustNotLoseUpdates.
@RepeatRule.Repeat(times = 250)
@Test(timeout = SEMI_LONG_TIMEOUT_MILLIS)
public void mustNotLoseUpdates() throws Exception {
// Another test that tries to squeeze out data race bugs. The idea is
// the following:
// We have a number of threads that are going to perform one of two
// operations on randomly chosen pages. The first operation is this:
// They are going to pin a random page, and then scan through it to
// find a record that is their own. A record has a thread-id and a
// counter, both 32-bit integers. If the record is not found, it will
// be added after all the other existing records on that page, if any.
// The last 32-bit word on a page is a sum of all the counters, and it
// will be updated. Then it will verify that the sum matches the
// counters.
// The second operation is read-only, where only the verification is
// performed.
// The kicker is this: the threads will also keep track of which of
// their counters on what pages are at what value, by maintaining
// mirror counters in memory. The threads will continuously check if
// these stay in sync with the data on the page cache. If they go out
// of sync, then we have a data race bug where we either pin the wrong
// pages or somehow lose updates to the pages.
// This is somewhat similar to what the PageCacheStressTest does.
final AtomicBoolean shouldStop = new AtomicBoolean();
final int cachePages = 20;
final int filePages = cachePages * 2;
final int threadCount = 8;
final int pageSize = threadCount * 4;
getPageCache(fs, cachePages, pageSize, PageCacheTracer.NULL, PageCursorTracerSupplier.NULL);
final PagedFile pagedFile = pageCache.map(file("a"), pageSize);
ensureAllPagesExists(filePages, pagedFile);
List<Future<UpdateResult>> futures = new ArrayList<>();
for (int i = 0; i < threadCount; i++) {
UpdateWorker worker = new UpdateWorker(i, filePages, shouldStop, pagedFile) {
protected void performReadOrUpdate(ThreadLocalRandom rng, boolean updateCounter, int pf_flags) throws IOException {
int pageId = rng.nextInt(0, filePages);
try (PageCursor cursor = pagedFile.io(pageId, pf_flags)) {
int counter;
try {
assertTrue(cursor.next());
do {
cursor.setOffset(offset);
counter = cursor.getInt();
} while (cursor.shouldRetry());
String lockName = updateCounter ? "PF_SHARED_WRITE_LOCK" : "PF_SHARED_READ_LOCK";
String reason = String.format("inconsistent page read from filePageId = %s, with %s, workerId = %s [t:%s]", pageId, lockName, threadId, Thread.currentThread().getId());
assertThat(reason, counter, is(pageCounts[pageId]));
} catch (Throwable throwable) {
shouldStop.set(true);
throw throwable;
}
if (updateCounter) {
counter++;
pageCounts[pageId]++;
cursor.setOffset(offset);
cursor.putInt(counter);
}
}
}
};
futures.add(executor.submit(worker));
}
Thread.sleep(40);
shouldStop.set(true);
verifyUpdateResults(filePages, pagedFile, futures);
pagedFile.close();
}
use of java.util.concurrent.ThreadLocalRandom in project neo4j by neo4j.
the class PageSwapperTest method concurrentPositionedVectoredReadsAndWritesMustNotInterfere.
@Test
public void concurrentPositionedVectoredReadsAndWritesMustNotInterfere() throws Exception {
File file = file("file");
PageSwapperFactory factory = createSwapperFactory();
final PageSwapper swapper = createSwapperAndFile(factory, file, 4);
final int pageCount = 100;
final int iterations = 20000;
final CountDownLatch startLatch = new CountDownLatch(1);
ByteBufferPage output = createPage(4);
for (int i = 0; i < pageCount; i++) {
output.putInt(i + 1, 0);
swapper.write(i, output);
}
Callable<Void> work = () -> {
ThreadLocalRandom rng = ThreadLocalRandom.current();
ByteBufferPage[] pages = new ByteBufferPage[10];
for (int i = 0; i < pages.length; i++) {
pages[i] = createPage(4);
}
startLatch.await();
for (int i = 0; i < iterations; i++) {
long startFilePageId = rng.nextLong(0, pageCount - pages.length);
if (rng.nextBoolean()) {
// Do read
long bytesRead = swapper.read(startFilePageId, pages, 0, pages.length);
assertThat(bytesRead, is(pages.length * 4L));
for (int j = 0; j < pages.length; j++) {
int expectedValue = (int) (1 + j + startFilePageId);
int actualValue = pages[j].getInt(0);
assertThat(actualValue, is(expectedValue));
}
} else {
// Do write
for (int j = 0; j < pages.length; j++) {
int value = (int) (1 + j + startFilePageId);
pages[j].putInt(value, 0);
}
assertThat(swapper.write(startFilePageId, pages, 0, pages.length), is(pages.length * 4L));
}
}
return null;
};
int threads = 8;
ExecutorService executor = Executors.newFixedThreadPool(threads, r -> {
Thread thread = Executors.defaultThreadFactory().newThread(r);
thread.setDaemon(true);
return thread;
});
List<Future<Void>> futures = new ArrayList<>(threads);
for (int i = 0; i < threads; i++) {
futures.add(executor.submit(work));
}
startLatch.countDown();
for (Future<Void> future : futures) {
future.get();
}
}
use of java.util.concurrent.ThreadLocalRandom in project neo4j by neo4j.
the class PageCacheTest method freshlyCreatedPagesMustContainAllZeros.
@Test(timeout = SEMI_LONG_TIMEOUT_MILLIS)
public void freshlyCreatedPagesMustContainAllZeros() throws IOException {
ThreadLocalRandom rng = ThreadLocalRandom.current();
configureStandardPageCache();
try (PagedFile pagedFile = pageCache.map(existingFile("a"), filePageSize);
PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
for (int i = 0; i < 100; i++) {
assertTrue(cursor.next());
for (int j = 0; j < filePageSize; j++) {
cursor.putByte((byte) rng.nextInt());
}
}
}
pageCache.close();
pageCache = null;
// make sure underlying pages are finalizable
System.gc();
// make sure underlying pages are finally collected
System.gc();
configureStandardPageCache();
try (PagedFile pagedFile = pageCache.map(existingFile("b"), filePageSize);
PageCursor cursor = pagedFile.io(0, PF_SHARED_WRITE_LOCK)) {
for (int i = 0; i < 100; i++) {
assertTrue(cursor.next());
for (int j = 0; j < filePageSize; j++) {
assertThat(cursor.getByte(), is((byte) 0));
}
}
}
}
Aggregations