use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testStorageReadsConcurrent.
private void testStorageReadsConcurrent(int offsetDeltaBetweenReads, int extraAllowedStorageReads, BiConsumerWithException<TestContext, UpdateableSegmentMetadata> executeBetweenReads, BiConsumerWithException<TestContext, UpdateableSegmentMetadata> finalCheck) throws Exception {
val maxAllowedStorageReads = 2 + extraAllowedStorageReads;
// Set a cache size big enough to prevent the Cache Manager from enabling "essential-only" mode due to over-utilization.
val cachePolicy = new CachePolicy(10000, 0.01, 1.0, Duration.ofMillis(10), Duration.ofMillis(10));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create the segment
val segmentId = createSegment(0, context);
val metadata = context.metadata.getStreamSegmentMetadata(segmentId);
context.storage.create(metadata.getName(), TIMEOUT).join();
// Append some data to the Read Index.
val dataInStorage = getAppendData(metadata.getName(), segmentId, 0, 0);
metadata.setLength(dataInStorage.getLength());
context.readIndex.append(segmentId, 0, dataInStorage);
// Then write to Storage.
context.storage.openWrite(metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, dataInStorage.getReader(), dataInStorage.getLength(), TIMEOUT)).join();
metadata.setStorageLength(dataInStorage.getLength());
// Then evict it from the cache.
boolean evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction.", evicted);
@Cleanup("release") val firstReadBlocker = new ReusableLatch();
@Cleanup("release") val firstRead = new ReusableLatch();
@Cleanup("release") val secondReadBlocker = new ReusableLatch();
@Cleanup("release") val secondRead = new ReusableLatch();
val cacheInsertCount = new AtomicInteger();
context.cacheStorage.insertCallback = address -> {
if (cacheInsertCount.incrementAndGet() > 1) {
Assert.fail("Too many cache inserts.");
}
};
val storageReadCount = new AtomicInteger();
context.storage.setReadInterceptor((segment, wrappedStorage) -> {
int readCount = storageReadCount.incrementAndGet();
if (readCount == 1) {
firstRead.release();
Exceptions.handleInterrupted(firstReadBlocker::await);
} else if (readCount == 2) {
secondRead.release();
Exceptions.handleInterrupted(secondReadBlocker::await);
} else if (readCount > maxAllowedStorageReads) {
Assert.fail("Too many storage reads. Max allowed = " + maxAllowedStorageReads);
}
});
// Initiate the first Storage Read.
val read1Result = context.readIndex.read(segmentId, 0, dataInStorage.getLength(), TIMEOUT);
val read1Data = new byte[dataInStorage.getLength()];
val read1Future = CompletableFuture.runAsync(() -> read1Result.readRemaining(read1Data, TIMEOUT), executorService());
// Wait for it to process.
firstRead.await();
// Initiate the second storage read.
val read2Length = dataInStorage.getLength() - offsetDeltaBetweenReads;
val read2Result = context.readIndex.read(segmentId, offsetDeltaBetweenReads, read2Length, TIMEOUT);
val read2Data = new byte[read2Length];
val read2Future = CompletableFuture.runAsync(() -> read2Result.readRemaining(read2Data, TIMEOUT), executorService());
secondRead.await();
// Unblock the first Storage Read and wait for it to complete.
firstReadBlocker.release();
read1Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Wait for the data from the first read to be fully added to the cache. Without this the subsequent append will not write to this entry.
TestUtils.await(() -> {
try {
return context.readIndex.read(0, 0, dataInStorage.getLength(), TIMEOUT).next().getType() == ReadResultEntryType.Cache;
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
}, 10, TIMEOUT.toMillis());
// If there's anything to do between the two reads, do it now.
executeBetweenReads.accept(context, metadata);
// Unblock second Storage Read.
secondReadBlocker.release();
read2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Perform final check.
finalCheck.accept(context, metadata);
Assert.assertEquals("Unexpected number of storage reads.", maxAllowedStorageReads, storageReadCount.get());
Assert.assertEquals("Unexpected number of cache inserts.", 1, cacheInsertCount.get());
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentReadTransactionStorageReadCacheFull.
/**
* Tests the following scenario:
* 1. Segment B has been merged into A
* 2. We are executing a read on Segment A over a portion where B was merged into A.
* 3. Concurrently with 2, a read on Segment B that went to LTS (possibly from the same result as before) wants to
* insert into the Cache, but the cache is full. The Cache Manager would want to clean up the cache.
* <p>
* We want to ensure that there is no deadlock for this scenario.
*/
@Test
public void testConcurrentReadTransactionStorageReadCacheFull() throws Exception {
// Must equal Cache Block size for easy eviction.
val appendLength = 4 * 1024;
val maxCacheSize = 2 * 1024 * 1024;
// We set the policy's max size to a much higher value to avoid entering "essential-only" state.
CachePolicy cachePolicy = new CachePolicy(2 * maxCacheSize, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy, maxCacheSize);
val rnd = new Random(0);
// Create parent segment and one transaction
long targetId = createSegment(0, context);
long sourceId = createTransaction(1, context);
val targetMetadata = context.metadata.getStreamSegmentMetadata(targetId);
val sourceMetadata = context.metadata.getStreamSegmentMetadata(sourceId);
createSegmentsInStorage(context);
// Write something to the transaction; and immediately evict it.
val append1 = new byte[appendLength];
val append2 = new byte[appendLength];
rnd.nextBytes(append1);
rnd.nextBytes(append2);
val allData = BufferView.builder().add(new ByteArraySegment(append1)).add(new ByteArraySegment(append2)).build();
appendSingleWrite(sourceId, new ByteArraySegment(append1), context);
sourceMetadata.setStorageLength(sourceMetadata.getLength());
// Increment the generation.
context.cacheManager.applyCachePolicy();
// Write a second thing to the transaction, and do not evict it.
appendSingleWrite(sourceId, new ByteArraySegment(append2), context);
context.storage.openWrite(sourceMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, allData.getReader(), allData.getLength(), TIMEOUT)).join();
// Seal & Begin-merge the transaction (do not seal in storage).
sourceMetadata.markSealed();
targetMetadata.setLength(sourceMetadata.getLength());
context.readIndex.beginMerge(targetId, 0L, sourceId);
sourceMetadata.markMerged();
sourceMetadata.markDeleted();
// At this point, the first append in the transaction should be evicted, while the second one should still be there.
@Cleanup val rr = context.readIndex.read(targetId, 0, (int) targetMetadata.getLength(), TIMEOUT);
@Cleanup val cacheCleanup = new AutoCloseObject();
@Cleanup("release") val insertingInCache = new ReusableLatch();
@Cleanup("release") val finishInsertingInCache = new ReusableLatch();
context.cacheStorage.beforeInsert = () -> {
// Prevent a stack overflow.
context.cacheStorage.beforeInsert = null;
// Fill up the cache with garbage - this will cause an unrecoverable Cache Full event (which is what we want).
int toFill = (int) (context.cacheStorage.getState().getMaxBytes() - context.cacheStorage.getState().getUsedBytes());
int address = context.cacheStorage.insert(new ByteArraySegment(new byte[toFill]));
cacheCleanup.onClose = () -> context.cacheStorage.delete(address);
// Notify that we have inserted.
insertingInCache.release();
// Block (while holding locks) until notified.
Exceptions.handleInterrupted(finishInsertingInCache::await);
};
// Begin a read process.
// First read must be a storage read.
val storageRead = rr.next();
Assert.assertEquals(ReadResultEntryType.Storage, storageRead.getType());
storageRead.requestContent(TIMEOUT);
// Copy contents out; this is not affected by our cache insert block.
byte[] readData1 = storageRead.getContent().join().slice(0, appendLength).getCopy();
// Wait for the insert callback to be blocked on our latch.
insertingInCache.await();
// Continue with the read. We are now expecting a Cache Read. Do it asynchronously (new thread).
val cacheReadFuture = CompletableFuture.supplyAsync(rr::next, executorService());
// Notify the cache insert that it's time to release now.
finishInsertingInCache.release();
// Wait for the async read to finish and grab its contents.
val cacheRead = cacheReadFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals(ReadResultEntryType.Cache, cacheRead.getType());
byte[] readData2 = cacheRead.getContent().join().slice(0, appendLength).getCopy();
// Validate data was read correctly.
val readData = BufferView.builder().add(new ByteArraySegment(readData1)).add(new ByteArraySegment(readData2)).build();
Assert.assertEquals("Unexpected data written.", allData, readData);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testStorageReadsConcurrentSmallLarge.
private void testStorageReadsConcurrentSmallLarge(int segmentLength, int read1Offset, int read1Length, int read2Offset, int read2Length) throws Exception {
// We only expect 2 Storage reads for this test.
val expectedStorageReadCount = 2;
val cachePolicy = new CachePolicy(100, 0.01, 1.0, Duration.ofMillis(10), Duration.ofMillis(10));
val config = ReadIndexConfig.builder().with(ReadIndexConfig.MEMORY_READ_MIN_LENGTH, DEFAULT_CONFIG.getMemoryReadMinLength()).with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, segmentLength).build();
@Cleanup TestContext context = new TestContext(config, cachePolicy);
// Create the segment
val segmentId = createSegment(0, context);
val metadata = context.metadata.getStreamSegmentMetadata(segmentId);
context.storage.create(metadata.getName(), TIMEOUT).join();
// Write some data to the segment in Storage.
val rnd = new Random(0);
val segmentData = new ByteArraySegment(new byte[segmentLength]);
rnd.nextBytes(segmentData.array());
context.storage.openWrite(metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, segmentData.getReader(), segmentData.getLength(), TIMEOUT)).join();
metadata.setLength(segmentData.getLength());
metadata.setStorageLength(segmentData.getLength());
@Cleanup("release") val firstReadBlocker = new ReusableLatch();
@Cleanup("release") val firstRead = new ReusableLatch();
@Cleanup("release") val secondReadBlocker = new ReusableLatch();
@Cleanup("release") val secondRead = new ReusableLatch();
val cacheInsertCount = new AtomicInteger();
context.cacheStorage.insertCallback = address -> cacheInsertCount.incrementAndGet();
val storageReadCount = new AtomicInteger();
context.storage.setReadInterceptor((segment, wrappedStorage) -> {
int readCount = storageReadCount.incrementAndGet();
if (readCount == 1) {
firstRead.release();
Exceptions.handleInterrupted(firstReadBlocker::await);
} else if (readCount == 2) {
secondRead.release();
Exceptions.handleInterrupted(secondReadBlocker::await);
}
});
// Initiate the first Storage Read.
val read1Result = context.readIndex.read(segmentId, read1Offset, read1Length, TIMEOUT);
val read1Data = new byte[read1Length];
val read1Future = CompletableFuture.runAsync(() -> read1Result.readRemaining(read1Data, TIMEOUT), executorService());
// Wait for it to process.
firstRead.await();
// Initiate the second storage read.
val read2Result = context.readIndex.read(segmentId, read2Offset, read2Length, TIMEOUT);
val read2Data = new byte[read2Length];
val read2Future = CompletableFuture.runAsync(() -> read2Result.readRemaining(read2Data, TIMEOUT), executorService());
secondRead.await();
// Unblock the first Storage Read and wait for it to complete.
firstReadBlocker.release();
read1Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Unblock second Storage Read.
secondReadBlocker.release();
read2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Wait for the data from the second read to be fully added to the cache (background task).
TestUtils.await(() -> {
try {
return context.readIndex.read(0, read2Offset, read2Length, TIMEOUT).next().getType() == ReadResultEntryType.Cache;
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
}, 10, TIMEOUT.toMillis());
// Verify that the initial read requests retrieved the data correctly.
Assert.assertEquals("Initial Read 1 (Storage)", segmentData.slice(read1Offset, read1Length), new ByteArraySegment(read1Data));
Assert.assertEquals("Initial Read 2 (Storage)", segmentData.slice(read2Offset, read2Length), new ByteArraySegment(read2Data));
// Re-issue the read requests for the exact same offsets. This time it should be from the cache.
val read1Data2 = new byte[read1Length];
context.readIndex.read(segmentId, read1Offset, read1Length, TIMEOUT).readRemaining(read1Data2, TIMEOUT);
Assert.assertArrayEquals("Reissued Read 1 (Cache)", read1Data, read1Data2);
val read2Data2 = new byte[read2Length];
context.readIndex.read(segmentId, read2Offset, read2Length, TIMEOUT).readRemaining(read2Data2, TIMEOUT);
Assert.assertArrayEquals("Reissued Read 2 (Cache)", read2Data, read2Data2);
// Verify that we did the expected number of Storage/Cache operations.
Assert.assertEquals("Unexpected number of storage reads.", expectedStorageReadCount, storageReadCount.get());
Assert.assertTrue("Unexpected number of cache inserts.", cacheInsertCount.get() == 3 || cacheInsertCount.get() == 1);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testStorageFailedCacheInsert.
/**
* Tests the ability to handle Cache/Index Update failures post a successful Storage Read.
*/
@Test
public void testStorageFailedCacheInsert() throws Exception {
final int segmentLength = 1024;
// Create a segment and write some data in Storage for it.
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = createSegments(context);
createSegmentsInStorage(context);
val testSegmentId = segmentIds.get(0);
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(testSegmentId);
sm.setStorageLength(segmentLength);
sm.setLength(segmentLength);
context.storage.openWrite(sm.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[segmentLength]), segmentLength, TIMEOUT)).join();
// Keep track of inserted/deleted calls to the Cache, and "fail" the insert call.
val inserted = new ReusableLatch();
val insertedAddress = new AtomicInteger(CacheStorage.NO_ADDRESS);
val deletedAddress = new AtomicInteger(Integer.MAX_VALUE);
context.cacheStorage.insertCallback = address -> {
// Immediately delete this data (prevent leaks).
context.cacheStorage.delete(address);
Assert.assertTrue(insertedAddress.compareAndSet(CacheStorage.NO_ADDRESS, address));
inserted.release();
throw new IntentionalException();
};
context.cacheStorage.deleteCallback = deletedAddress::set;
// Trigger a read. The first read call will be served with data directly from Storage, so we expect it to be successful.
@Cleanup ReadResult readResult = context.readIndex.read(testSegmentId, 0, segmentLength, TIMEOUT);
ReadResultEntry entry = readResult.next();
Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, entry.getType());
entry.requestContent(TIMEOUT);
// This should complete without issues.
entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify that the cache insert attempt has been made
inserted.await();
Assert.assertNotEquals("Expected an insert attempt to have been made.", CacheStorage.NO_ADDRESS, insertedAddress.get());
AssertExtensions.assertEventuallyEquals(CacheStorage.NO_ADDRESS, deletedAddress::get, TIMEOUT.toMillis());
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class StreamSegmentStorageReaderTests method populate.
private byte[] populate(Storage s) {
byte[] data = new byte[SEGMENT_LENGTH];
val rnd = new Random(0);
rnd.nextBytes(data);
val handle = s.create(SEGMENT_NAME, TIMEOUT).thenCompose(si -> s.openWrite(SEGMENT_NAME)).join();
final int appendSize = data.length / SEGMENT_APPEND_COUNT;
int offset = 0;
for (int i = 0; i < appendSize; i++) {
int writeLength = Math.min(appendSize, data.length - offset);
s.write(handle, offset, new ByteArrayInputStream(data, offset, writeLength), writeLength, TIMEOUT).join();
offset += writeLength;
}
return data;
}
Aggregations