use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testCacheEssentialOnlyMode.
/**
* Tests the ability of the Read Index to handle "Essential-Only" cache mode, where only cache entries that are not
* yet persisted to Storage may be added to the cache.
*/
@Test
public void testCacheEssentialOnlyMode() throws Exception {
val rnd = new Random(0);
// Cache block size.
val appendSize = 4 * 1024;
val segmentLength = 10 * appendSize;
// Setup a cache policy that will keep at most 4 blocks in the cache, and enter essential mode after 4 blocks too
// NOTE: blocks includes the metadata block (internal to the cache), so usable blocks is 3.
CachePolicy cachePolicy = new CachePolicy(segmentLength, 0.3, 0.4, Duration.ofHours(1000), Duration.ofSeconds(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Not blocking anything now.
context.cacheStorage.appendReturnBlocker = null;
// Create segment, generate some content for it, setup its metadata and write 40% of it to Storage.
long segmentId = createSegment(0, context);
val segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
createSegmentsInStorage(context);
val segmentData = new byte[segmentLength];
rnd.nextBytes(segmentData);
val part1 = new ByteArraySegment(segmentData, 0, appendSize);
val part2 = new ByteArraySegment(segmentData, appendSize, appendSize);
val part3 = new ByteArraySegment(segmentData, 2 * appendSize, appendSize);
val part4 = new ByteArraySegment(segmentData, 3 * appendSize, appendSize);
val part5 = new ByteArraySegment(segmentData, 4 * appendSize, appendSize);
segmentMetadata.setLength(segmentLength);
segmentMetadata.setStorageLength(part1.getLength() + part2.getLength());
context.storage.openWrite(segmentMetadata.getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(segmentData), (int) segmentMetadata.getStorageLength(), TIMEOUT)).join();
val insertCount = new AtomicInteger(0);
val storageReadCount = new AtomicInteger(0);
context.cacheStorage.insertCallback = address -> insertCount.incrementAndGet();
context.storage.setReadInterceptor((segment, wrappedStorage) -> storageReadCount.incrementAndGet());
// Helper for reading a segment part.
BiConsumer<Long, BufferView> readPart = (partOffset, partContents) -> {
try {
@Cleanup val rr = context.readIndex.read(segmentId, partOffset, partContents.getLength(), TIMEOUT);
val readData = rr.readRemaining(partContents.getLength(), TIMEOUT);
Assert.assertEquals(partContents, BufferView.wrap(readData));
} catch (Exception ex) {
throw new CompletionException(ex);
}
};
// Read parts 1 and 2 (separately). They should be cached as individual entries.
readPart.accept(0L, part1);
Assert.assertEquals(1, storageReadCount.get());
// Cache insertion is done async. Need to wait until we write
AssertExtensions.assertEventuallyEquals(1, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(1, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
boolean evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
readPart.accept((long) part1.getLength(), part2);
// We expect 2 storage reads and also 2 cache inserts.
Assert.assertEquals(2, storageReadCount.get());
// This one is done asynchronously.
AssertExtensions.assertEventuallyEquals(2, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(2, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
// Append parts 3, 4 and 5.
context.readIndex.append(segmentId, segmentMetadata.getStorageLength(), part3);
// This insertion is done synchronously.
Assert.assertEquals(3, insertCount.get());
// Eviction (part 1) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 3 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength(), part4);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 4, insertCount.get());
// Eviction (part 2) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 4 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength() + part4.getLength(), part5);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 5, insertCount.get());
// Nothing to evict.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expecting an eviction after writing 5 blocks.", evicted);
Assert.assertTrue("Expected to be in essential-only mode after pinning 3 blocks.", context.cacheManager.isEssentialEntriesOnly());
// Verify that re-reading parts 1 and 2 results in no cache inserts.
insertCount.set(0);
storageReadCount.set(0);
int expectedReadCount = 0;
for (int i = 0; i < 5; i++) {
readPart.accept(0L, part1);
readPart.accept((long) part1.getLength(), part2);
expectedReadCount += 2;
}
Assert.assertTrue("Not expected to have exited essential-only mode.", context.cacheManager.isEssentialEntriesOnly());
Assert.assertEquals("Unexpected number of storage reads in essential-only mode.", expectedReadCount, storageReadCount.get());
Assert.assertEquals("Unexpected number of cache inserts in essential-only mode.", 0, insertCount.get());
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testMergeReadResultConcurrentCompleteMerge.
/**
* Tests a case when a Read Result about to fetch a {@link RedirectedReadResultEntry} at the same time as the source
* {@link StreamSegmentReadIndex} is about to close.
* Note: this test overlaps with {@link #testMergeReadResultCancelledOnClose()}, but this one is more complex so it
* is still worth it to keep the other one for simplicity.
*/
@Test
public void testMergeReadResultConcurrentCompleteMerge() throws Exception {
@Cleanup TestContext context = new TestContext();
val spiedIndex = Mockito.spy(context.readIndex);
// Create parent segment and one transaction
long targetSegmentId = createSegment(0, context);
long sourceSegmentId = createTransaction(1, context);
createSegmentsInStorage(context);
val targetMetadata = context.metadata.getStreamSegmentMetadata(targetSegmentId);
val sourceMetadata = context.metadata.getStreamSegmentMetadata(sourceSegmentId);
// Write something to the source segment - only in Storage.
val sourceData = getAppendData(context.metadata.getStreamSegmentMetadata(sourceSegmentId).getName(), sourceSegmentId, 0, 0);
val sourceWriteHandle = context.storage.openWrite(sourceMetadata.getName()).join();
context.storage.write(sourceWriteHandle, 0, sourceData.getReader(), sourceData.getLength(), TIMEOUT).join();
// Update the source metadata to reflect the Storage situation.
sourceMetadata.setLength(sourceData.getLength());
sourceMetadata.setStorageLength(sourceData.getLength());
sourceMetadata.markSealed();
// Intercept the ContainerReadIndex.createSegmentIndex to wrap the actual StreamSegmentReadIndex with a spy.
val spiedIndices = Collections.synchronizedMap(new HashMap<Long, StreamSegmentReadIndex>());
Mockito.doAnswer(arg1 -> {
val sm = (SegmentMetadata) arg1.getArgument(1);
StreamSegmentReadIndex result = (StreamSegmentReadIndex) arg1.callRealMethod();
if (result == null) {
spiedIndices.remove(sm.getId());
} else if (spiedIndices.get(sm.getId()) == null) {
result = Mockito.spy(result);
spiedIndices.put(sm.getId(), result);
}
return spiedIndices.get(sm.getId());
}).when(spiedIndex).createSegmentIndex(Mockito.any(ReadIndexConfig.class), Mockito.any(SegmentMetadata.class), Mockito.any(CacheStorage.class), Mockito.any(ReadOnlyStorage.class), Mockito.any(ScheduledExecutorService.class), Mockito.anyBoolean());
// Initiate the merge.
targetMetadata.setLength(sourceMetadata.getLength());
spiedIndex.beginMerge(targetSegmentId, 0, sourceSegmentId);
sourceMetadata.markMerged();
// Sanity check. Before completeMerge, we should get a RedirectedReadResultEntry.
@Cleanup val rrBeforeComplete = spiedIndex.read(targetSegmentId, 0, sourceData.getLength(), TIMEOUT);
val reBeforeComplete = rrBeforeComplete.next();
Assert.assertTrue(reBeforeComplete instanceof RedirectedReadResultEntry);
rrBeforeComplete.close();
Assert.assertTrue(reBeforeComplete.getContent().isCancelled());
// Intercept the source segment's index getSingleReadResultEntry to "concurrently" complete its merger.
Mockito.doAnswer(arg2 -> {
// Simulate Storage merger.
val targetWriteHandle = context.storage.openWrite(targetMetadata.getName()).join();
context.storage.write(targetWriteHandle, 0, sourceData.getReader(), sourceData.getLength(), TIMEOUT).join();
// Update metadata.
sourceMetadata.markDeleted();
targetMetadata.setStorageLength(sourceMetadata.getStorageLength());
spiedIndex.completeMerge(targetSegmentId, sourceSegmentId);
return arg2.callRealMethod();
}).when(spiedIndices.get(sourceSegmentId)).getSingleReadResultEntry(Mockito.anyLong(), Mockito.anyInt(), Mockito.anyBoolean());
// Setup a Read Result, verify that it is indeed returning a RedirectedReadResultEntry, and immediately close it.
// Then verify that the entry itself has been cancelled.
@Cleanup val rrAfterComplete = spiedIndex.read(targetSegmentId, 0, sourceData.getLength(), TIMEOUT);
val reAfterComplete = rrAfterComplete.next();
Assert.assertTrue(reAfterComplete instanceof StorageReadResultEntry);
reAfterComplete.requestContent(TIMEOUT);
reAfterComplete.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentReadIndex method beginMerge.
/**
* Executes Step 1 of the 2-Step Merge Process.
* The StreamSegments are merged (Source->Target@Offset) in Metadata and a ReadIndex Redirection is put in place.
* At this stage, the Source still exists as a physical object in Storage, and we need to keep its ReadIndex around, pointing
* to the old object.
*
* @param offset The offset within the StreamSegment to merge at.
* @param sourceStreamSegmentIndex The Read Index to begin merging.
* @throws NullPointerException If data is null.
* @throws IllegalStateException If the current StreamSegment is a child StreamSegment.
* @throws IllegalArgumentException If the operation would cause writing beyond the StreamSegment's Length.
* @throws IllegalArgumentException If the offset is invalid (does not match the previous append offset).
* @throws IllegalArgumentException If sourceStreamSegmentIndex refers to a StreamSegment that is already merged.
* @throws IllegalArgumentException If sourceStreamSegmentIndex refers to a StreamSegment that has a different parent
* StreamSegment than the current index's one.
*/
void beginMerge(long offset, StreamSegmentReadIndex sourceStreamSegmentIndex) {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "beginMerge", offset, sourceStreamSegmentIndex.traceObjectId);
Exceptions.checkNotClosed(this.closed, this);
Exceptions.checkArgument(!sourceStreamSegmentIndex.isMerged(), "sourceStreamSegmentIndex", "Given StreamSegmentReadIndex is already merged.");
SegmentMetadata sourceMetadata = sourceStreamSegmentIndex.metadata;
Exceptions.checkArgument(sourceMetadata.isSealed(), "sourceStreamSegmentIndex", "Given StreamSegmentReadIndex refers to a StreamSegment that is not sealed.");
long sourceLength = sourceStreamSegmentIndex.getSegmentLength();
RedirectIndexEntry newEntry = new RedirectIndexEntry(offset, sourceStreamSegmentIndex);
if (sourceLength == 0) {
// Nothing to do. Just record that there is a merge for this source Segment id.
return;
}
// Metadata check can be done outside the write lock.
// Adding at the end means that we always need to "catch-up" with Length. Check to see if adding
// this entry will make us catch up to it or not.
long ourLength = getSegmentLength();
long endOffset = offset + sourceLength;
Exceptions.checkArgument(endOffset <= ourLength, "offset", "The given range of bytes(%d-%d) is beyond the StreamSegment Length (%d).", offset, endOffset, ourLength);
// Check and record the merger (optimistically).
log.debug("{}: BeginMerge (Offset = {}, Length = {}).", this.traceObjectId, offset, newEntry.getLength());
synchronized (this.lock) {
Exceptions.checkArgument(!this.pendingMergers.containsKey(sourceMetadata.getId()), "sourceStreamSegmentIndex", "Given StreamSegmentReadIndex is already merged or in the process of being merged into this one.");
this.pendingMergers.put(sourceMetadata.getId(), new PendingMerge(newEntry.key()));
try {
ReadIndexEntry oldEntry = addToIndex(newEntry);
assert oldEntry == null : String.format("Added a new entry in the ReadIndex that overrode an existing element. New = %s, Old = %s.", newEntry, oldEntry);
} catch (Exception ex) {
// If the merger failed, roll back the markers.
this.pendingMergers.remove(sourceMetadata.getId());
throw ex;
}
}
this.lastAppendedOffset.set(newEntry.getLastStreamSegmentOffset());
LoggerHelpers.traceLeave(log, this.traceObjectId, "beginMerge", traceId);
}
Aggregations