use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.
the class ContainerReadIndexTests method testTruncate.
/**
* Tests a scenario of truncation that does not happen concurrently with reading (segments are pre-truncated).
*/
@Test
public void testTruncate() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, new CachePolicy(Long.MAX_VALUE, Duration.ofMillis(1000000), Duration.ofMillis(10000)));
ArrayList<Long> segmentIds = createSegments(context);
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendData(segmentIds, segmentContents, context);
// Truncate all segments at their mid-points.
for (int i = 0; i < segmentIds.size(); i++) {
val sm = context.metadata.getStreamSegmentMetadata(segmentIds.get(i));
sm.setStartOffset(sm.getLength() / 2);
if (i % 2 == 0) {
sm.setStorageLength(sm.getStartOffset());
} else {
sm.setStorageLength(sm.getStartOffset() / 2);
}
}
// Check all the appended data. This includes verifying access to already truncated offsets.
checkReadIndex("PostTruncate", segmentContents, context);
checkReadIndexDirect(segmentContents, context);
// Verify that truncated data is eligible for eviction, by checking that at least one Cache Entry is being removed.
for (long segmentId : segmentIds) {
val sm = context.metadata.getStreamSegmentMetadata(segmentId);
// We need to set this in order to verify cache evictions.
sm.setStorageLength(sm.getLength());
}
HashSet<Integer> deletedEntries = new HashSet<>();
context.cacheStorage.deleteCallback = deletedEntries::add;
context.cacheManager.applyCachePolicy();
AssertExtensions.assertGreaterThan("Expected at least one cache entry to be removed.", 0, deletedEntries.size());
}
use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentReadTransactionStorageMerge.
/**
* Tests the following scenario, where the Read Index has a read from a portion in a parent segment where a transaction
* was just merged (fully in storage), but the read request might result in either an ObjectClosedException or
* StreamSegmentNotExistsException:
* * A Parent Segment has a Transaction with some data in it, and at least 1 byte of data not in cache.
* * The Transaction is begin-merged in the parent (Tier 1 only).
* * A Read Request is issued to the Parent for the range of data from the Transaction, which includes the 1 byte not in cache.
* * The Transaction is fully merged (Tier 2).
* * The Read Request is invoked and its content requested. This should correctly retrieve the data from the Parent
* Segment in Storage, and not attempt to access the now-defunct Transaction segment.
*/
@Test
public void testConcurrentReadTransactionStorageMerge() throws Exception {
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create parent segment and one transaction
long parentId = createSegment(0, context);
long transactionId = createTransaction(1, context);
createSegmentsInStorage(context);
ByteArraySegment writeData = getAppendData(context.metadata.getStreamSegmentMetadata(transactionId).getName(), transactionId, 0, 0);
ReadResultEntry entry = setupMergeRead(parentId, transactionId, writeData.getCopy(), context);
context.readIndex.completeMerge(parentId, transactionId);
BufferView contents = entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
byte[] readData = contents.getCopy();
Assert.assertArrayEquals("Unexpected data read from parent segment.", writeData.getCopy(), readData);
}
use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.
the class ContainerReadIndexTests method testStorageReadsConcurrent.
private void testStorageReadsConcurrent(int offsetDeltaBetweenReads, int extraAllowedStorageReads, BiConsumerWithException<TestContext, UpdateableSegmentMetadata> executeBetweenReads, BiConsumerWithException<TestContext, UpdateableSegmentMetadata> finalCheck) throws Exception {
val maxAllowedStorageReads = 2 + extraAllowedStorageReads;
// Set a cache size big enough to prevent the Cache Manager from enabling "essential-only" mode due to over-utilization.
val cachePolicy = new CachePolicy(10000, 0.01, 1.0, Duration.ofMillis(10), Duration.ofMillis(10));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create the segment
val segmentId = createSegment(0, context);
val metadata = context.metadata.getStreamSegmentMetadata(segmentId);
context.storage.create(metadata.getName(), TIMEOUT).join();
// Append some data to the Read Index.
val dataInStorage = getAppendData(metadata.getName(), segmentId, 0, 0);
metadata.setLength(dataInStorage.getLength());
context.readIndex.append(segmentId, 0, dataInStorage);
// Then write to Storage.
context.storage.openWrite(metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, dataInStorage.getReader(), dataInStorage.getLength(), TIMEOUT)).join();
metadata.setStorageLength(dataInStorage.getLength());
// Then evict it from the cache.
boolean evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction.", evicted);
@Cleanup("release") val firstReadBlocker = new ReusableLatch();
@Cleanup("release") val firstRead = new ReusableLatch();
@Cleanup("release") val secondReadBlocker = new ReusableLatch();
@Cleanup("release") val secondRead = new ReusableLatch();
val cacheInsertCount = new AtomicInteger();
context.cacheStorage.insertCallback = address -> {
if (cacheInsertCount.incrementAndGet() > 1) {
Assert.fail("Too many cache inserts.");
}
};
val storageReadCount = new AtomicInteger();
context.storage.setReadInterceptor((segment, wrappedStorage) -> {
int readCount = storageReadCount.incrementAndGet();
if (readCount == 1) {
firstRead.release();
Exceptions.handleInterrupted(firstReadBlocker::await);
} else if (readCount == 2) {
secondRead.release();
Exceptions.handleInterrupted(secondReadBlocker::await);
} else if (readCount > maxAllowedStorageReads) {
Assert.fail("Too many storage reads. Max allowed = " + maxAllowedStorageReads);
}
});
// Initiate the first Storage Read.
val read1Result = context.readIndex.read(segmentId, 0, dataInStorage.getLength(), TIMEOUT);
val read1Data = new byte[dataInStorage.getLength()];
val read1Future = CompletableFuture.runAsync(() -> read1Result.readRemaining(read1Data, TIMEOUT), executorService());
// Wait for it to process.
firstRead.await();
// Initiate the second storage read.
val read2Length = dataInStorage.getLength() - offsetDeltaBetweenReads;
val read2Result = context.readIndex.read(segmentId, offsetDeltaBetweenReads, read2Length, TIMEOUT);
val read2Data = new byte[read2Length];
val read2Future = CompletableFuture.runAsync(() -> read2Result.readRemaining(read2Data, TIMEOUT), executorService());
secondRead.await();
// Unblock the first Storage Read and wait for it to complete.
firstReadBlocker.release();
read1Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Wait for the data from the first read to be fully added to the cache. Without this the subsequent append will not write to this entry.
TestUtils.await(() -> {
try {
return context.readIndex.read(0, 0, dataInStorage.getLength(), TIMEOUT).next().getType() == ReadResultEntryType.Cache;
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
}, 10, TIMEOUT.toMillis());
// If there's anything to do between the two reads, do it now.
executeBetweenReads.accept(context, metadata);
// Unblock second Storage Read.
secondReadBlocker.release();
read2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Perform final check.
finalCheck.accept(context, metadata);
Assert.assertEquals("Unexpected number of storage reads.", maxAllowedStorageReads, storageReadCount.get());
Assert.assertEquals("Unexpected number of cache inserts.", 1, cacheInsertCount.get());
}
use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentEvictionTransactionStorageMerge.
/**
* Tests a scenario where a call to {@link StreamSegmentReadIndex#completeMerge} executes concurrently with a
* CacheManager eviction. The Cache Manager must not evict the data for recently transferred entries, even if they
* would otherwise be eligible for eviction in the source segment.
*/
@Test
public void testConcurrentEvictionTransactionStorageMerge() throws Exception {
val mergeOffset = 1;
val appendLength = 1;
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create parent segment and one transaction
long targetId = createSegment(0, context);
long sourceId = createTransaction(1, context);
val targetMetadata = context.metadata.getStreamSegmentMetadata(targetId);
val sourceMetadata = context.metadata.getStreamSegmentMetadata(sourceId);
createSegmentsInStorage(context);
// Write something to the parent segment.
appendSingleWrite(targetId, new ByteArraySegment(new byte[mergeOffset]), context);
context.storage.openWrite(targetMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[mergeOffset]), mergeOffset, TIMEOUT)).join();
// Write something to the transaction, but do not write anything in Storage - we want to verify we don't even
// try to reach in there.
val sourceContents = getAppendData(context.metadata.getStreamSegmentMetadata(sourceId).getName(), sourceId, 0, 0);
appendSingleWrite(sourceId, sourceContents, context);
sourceMetadata.setStorageLength(sourceMetadata.getLength());
// Seal & Begin-merge the transaction (do not seal in storage).
sourceMetadata.markSealed();
targetMetadata.setLength(sourceMetadata.getLength() + mergeOffset);
context.readIndex.beginMerge(targetId, mergeOffset, sourceId);
sourceMetadata.markMerged();
sourceMetadata.markDeleted();
// Trigger a Complete Merge. We want to intercept and pause it immediately before it is unregistered from the
// Cache Manager.
@Cleanup("release") val unregisterCalled = new ReusableLatch();
@Cleanup("release") val unregisterBlocker = new ReusableLatch();
context.cacheManager.setUnregisterInterceptor(c -> {
unregisterCalled.release();
Exceptions.handleInterrupted(unregisterBlocker::await);
});
val completeMerge = CompletableFuture.runAsync(() -> {
try {
context.readIndex.completeMerge(targetId, sourceId);
} catch (Exception ex) {
throw new CompletionException(ex);
}
}, executorService());
// Clear the cache. The source Read index is still registered in the Cache Manager - we want to ensure that any
// eviction happening at this point will not delete anything from the Cache that we don't want deleted.
unregisterCalled.await();
context.cacheManager.applyCachePolicy();
// Wait for the operation to complete.
unregisterBlocker.release();
completeMerge.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify that we can append (appending will modify the last cache entry; if it had been modified this would not
// work anymore).
val appendOffset = (int) targetMetadata.getLength();
val appendData = new byte[appendLength];
appendData[0] = (byte) 23;
targetMetadata.setLength(appendOffset + appendLength);
context.readIndex.append(targetId, appendOffset, new ByteArraySegment(appendData));
// Issue a read and verify we can read everything that we wrote. If it had been evicted or erroneously deleted
// from the cache this would result in an error.
byte[] expectedData = new byte[appendOffset + appendLength];
sourceContents.copyTo(expectedData, mergeOffset, sourceContents.getLength());
System.arraycopy(appendData, 0, expectedData, appendOffset, appendLength);
ReadResult rr = context.readIndex.read(targetId, 0, expectedData.length, TIMEOUT);
Assert.assertTrue("Parent Segment read indicates no data available.", rr.hasNext());
byte[] actualData = new byte[expectedData.length];
rr.readRemaining(actualData, TIMEOUT);
Assert.assertArrayEquals("Unexpected data read back.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.CachePolicy in project pravega by pravega.
the class ContainerReadIndexTests method testCacheEssentialOnlyMode.
/**
* Tests the ability of the Read Index to handle "Essential-Only" cache mode, where only cache entries that are not
* yet persisted to Storage may be added to the cache.
*/
@Test
public void testCacheEssentialOnlyMode() throws Exception {
val rnd = new Random(0);
// Cache block size.
val appendSize = 4 * 1024;
val segmentLength = 10 * appendSize;
// Setup a cache policy that will keep at most 4 blocks in the cache, and enter essential mode after 4 blocks too
// NOTE: blocks includes the metadata block (internal to the cache), so usable blocks is 3.
CachePolicy cachePolicy = new CachePolicy(segmentLength, 0.3, 0.4, Duration.ofHours(1000), Duration.ofSeconds(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Not blocking anything now.
context.cacheStorage.appendReturnBlocker = null;
// Create segment, generate some content for it, setup its metadata and write 40% of it to Storage.
long segmentId = createSegment(0, context);
val segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
createSegmentsInStorage(context);
val segmentData = new byte[segmentLength];
rnd.nextBytes(segmentData);
val part1 = new ByteArraySegment(segmentData, 0, appendSize);
val part2 = new ByteArraySegment(segmentData, appendSize, appendSize);
val part3 = new ByteArraySegment(segmentData, 2 * appendSize, appendSize);
val part4 = new ByteArraySegment(segmentData, 3 * appendSize, appendSize);
val part5 = new ByteArraySegment(segmentData, 4 * appendSize, appendSize);
segmentMetadata.setLength(segmentLength);
segmentMetadata.setStorageLength(part1.getLength() + part2.getLength());
context.storage.openWrite(segmentMetadata.getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(segmentData), (int) segmentMetadata.getStorageLength(), TIMEOUT)).join();
val insertCount = new AtomicInteger(0);
val storageReadCount = new AtomicInteger(0);
context.cacheStorage.insertCallback = address -> insertCount.incrementAndGet();
context.storage.setReadInterceptor((segment, wrappedStorage) -> storageReadCount.incrementAndGet());
// Helper for reading a segment part.
BiConsumer<Long, BufferView> readPart = (partOffset, partContents) -> {
try {
@Cleanup val rr = context.readIndex.read(segmentId, partOffset, partContents.getLength(), TIMEOUT);
val readData = rr.readRemaining(partContents.getLength(), TIMEOUT);
Assert.assertEquals(partContents, BufferView.wrap(readData));
} catch (Exception ex) {
throw new CompletionException(ex);
}
};
// Read parts 1 and 2 (separately). They should be cached as individual entries.
readPart.accept(0L, part1);
Assert.assertEquals(1, storageReadCount.get());
// Cache insertion is done async. Need to wait until we write
AssertExtensions.assertEventuallyEquals(1, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(1, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
boolean evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
readPart.accept((long) part1.getLength(), part2);
// We expect 2 storage reads and also 2 cache inserts.
Assert.assertEquals(2, storageReadCount.get());
// This one is done asynchronously.
AssertExtensions.assertEventuallyEquals(2, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(2, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
// Append parts 3, 4 and 5.
context.readIndex.append(segmentId, segmentMetadata.getStorageLength(), part3);
// This insertion is done synchronously.
Assert.assertEquals(3, insertCount.get());
// Eviction (part 1) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 3 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength(), part4);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 4, insertCount.get());
// Eviction (part 2) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 4 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength() + part4.getLength(), part5);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 5, insertCount.get());
// Nothing to evict.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expecting an eviction after writing 5 blocks.", evicted);
Assert.assertTrue("Expected to be in essential-only mode after pinning 3 blocks.", context.cacheManager.isEssentialEntriesOnly());
// Verify that re-reading parts 1 and 2 results in no cache inserts.
insertCount.set(0);
storageReadCount.set(0);
int expectedReadCount = 0;
for (int i = 0; i < 5; i++) {
readPart.accept(0L, part1);
readPart.accept((long) part1.getLength(), part2);
expectedReadCount += 2;
}
Assert.assertTrue("Not expected to have exited essential-only mode.", context.cacheManager.isEssentialEntriesOnly());
Assert.assertEquals("Unexpected number of storage reads in essential-only mode.", expectedReadCount, storageReadCount.get());
Assert.assertEquals("Unexpected number of cache inserts in essential-only mode.", 0, insertCount.get());
}
Aggregations