Search in sources :

Example 16 with UpdateableSegmentMetadata

use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.

the class OperationLogTestBase method createStreamSegmentsInMetadata.

// region Creating Segments
/**
 * Updates the given Container Metadata to have a number of StreamSegments. All created StreamSegments will have
 * Ids from 0 to the value of streamSegmentCount.
 */
HashSet<Long> createStreamSegmentsInMetadata(int streamSegmentCount, UpdateableContainerMetadata containerMetadata) {
    assert streamSegmentCount <= MAX_SEGMENT_COUNT : "cannot have more than " + MAX_SEGMENT_COUNT + " StreamSegments for this test.";
    HashSet<Long> result = new HashSet<>();
    for (long streamSegmentId = 0; streamSegmentId < streamSegmentCount; streamSegmentId++) {
        result.add(streamSegmentId);
        String name = getStreamSegmentName(streamSegmentId);
        UpdateableSegmentMetadata segmentMetadata = containerMetadata.mapStreamSegmentId(name, streamSegmentId);
        segmentMetadata.setLength(0);
        segmentMetadata.setStorageLength(0);
    }
    return result;
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) HashSet(java.util.HashSet)

Example 17 with UpdateableSegmentMetadata

use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.

the class OperationLogTestBase method createTransactionsInMetadata.

/**
 * Updates the given Container Metadata to have a number of Transactions mapped to the given StreamSegment Ids.
 */
AbstractMap<Long, Long> createTransactionsInMetadata(HashSet<Long> streamSegmentIds, int transactionsPerStreamSegment, UpdateableContainerMetadata containerMetadata) {
    assert transactionsPerStreamSegment <= MAX_SEGMENT_COUNT : "cannot have more than " + MAX_SEGMENT_COUNT + " Transactions per StreamSegment for this test.";
    HashMap<Long, Long> result = new HashMap<>();
    for (long streamSegmentId : streamSegmentIds) {
        String streamSegmentName = containerMetadata.getStreamSegmentMetadata(streamSegmentId).getName();
        for (int i = 0; i < transactionsPerStreamSegment; i++) {
            long transactionId = getTransactionId(streamSegmentId, i);
            assert result.put(transactionId, streamSegmentId) == null : "duplicate TransactionId generated: " + transactionId;
            assert !streamSegmentIds.contains(transactionId) : "duplicate StreamSegmentId (Transaction) generated: " + transactionId;
            String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(streamSegmentName, UUID.randomUUID());
            UpdateableSegmentMetadata transactionMetadata = containerMetadata.mapStreamSegmentId(transactionName, transactionId, streamSegmentId);
            transactionMetadata.setLength(0);
            transactionMetadata.setStorageLength(0);
        }
    }
    return result;
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) HashMap(java.util.HashMap) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 18 with UpdateableSegmentMetadata

use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.

the class ContainerReadIndexTests method testCacheEviction.

/**
 * Tests the ability to evict entries from the ReadIndex under various conditions:
 * * If an entry is aged out
 * * If an entry is pushed out because of cache space pressure.
 * <p>
 * This also verifies that certain entries, such as RedirectReadIndexEntries and entries after the Storage Offset are
 * not removed.
 * <p>
 * The way this test goes is as follows (it's pretty subtle, because there aren't many ways to hook into the ReadIndex and see what it's doing)
 * 1. It creates a bunch of segments, and populates them in storage (each) up to offset N/2-1 (this is called pre-storage)
 * 2. It populates the ReadIndex for each of those segments from offset N/2 to offset N-1 (this is called post-storage)
 * 3. It loads all the data from Storage into the ReadIndex, in entries of size equal to those already loaded in step #2.
 * 3a. At this point, all the entries added in step #2 have Generations 0..A/4-1, and step #3 have generations A/4..A-1
 * 4. Append more data at the end. This forces the generation to increase to 1.25A.
 * 4a. Nothing should be evicted from the cache now, since the earliest items are all post-storage.
 * 5. We 'touch' (read) the first 1/3 of pre-storage entries (offsets 0..N/4).
 * 5a. At this point, those entries (offsets 0..N/6) will have the newest generations (1.25A..1.5A)
 * 6. We append more data (equivalent to the data we touched)
 * 6a. Nothing should be evicted, since those generations that were just eligible for removal were touched and bumped up.
 * 7. We forcefully increase the current generation by 1 (without touching the ReadIndex)
 * 7a. At this point, we expect all the pre-storage items, except the touched ones, to be evicted. This is generations 0.25A-0.75A.
 * 8. Update the metadata and indicate that all the post-storage entries are now pre-storage and bump the generation by 0.75A.
 * 8a. At this point, we expect all former post-storage items and pre-storage items to be evicted (in this order).
 * <p>
 * The final order of eviction (in terms of offsets, for each segment), is:
 * * 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N (remember that we added quite a bunch of items after the initial run).
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testCacheEviction() throws Exception {
    // Create a CachePolicy with a set number of generations and a known max size.
    // Each generation contains exactly one entry, so the number of generations is also the number of entries.
    final int appendSize = 100;
    // This also doubles as number of generations (each generation, we add one append for each segment).
    final int entriesPerSegment = 100;
    final int cacheMaxSize = SEGMENT_COUNT * entriesPerSegment * appendSize;
    // 25% of the entries are beyond the StorageOffset
    final int postStorageEntryCount = entriesPerSegment / 4;
    // 75% of the entries are before the StorageOffset.
    final int preStorageEntryCount = entriesPerSegment - postStorageEntryCount;
    CachePolicy cachePolicy = new CachePolicy(cacheMaxSize, Duration.ofMillis(1000 * 2 * entriesPerSegment), Duration.ofMillis(1000));
    // To properly test this, we want predictable storage reads.
    ReadIndexConfig config = ConfigHelpers.withInfiniteCachePolicy(ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, appendSize)).build();
    ArrayList<CacheKey> removedKeys = new ArrayList<>();
    @Cleanup TestContext context = new TestContext(config, cachePolicy);
    // Record every cache removal.
    context.cacheFactory.cache.removeCallback = removedKeys::add;
    // Create the segments (metadata + storage).
    ArrayList<Long> segmentIds = createSegments(context);
    createSegmentsInStorage(context);
    // Populate the Storage with appropriate data.
    byte[] preStorageData = new byte[preStorageEntryCount * appendSize];
    for (long segmentId : segmentIds) {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        val handle = context.storage.openWrite(sm.getName()).join();
        context.storage.write(handle, 0, new ByteArrayInputStream(preStorageData), preStorageData.length, TIMEOUT).join();
        sm.setStorageLength(preStorageData.length);
        sm.setLength(preStorageData.length);
    }
    // Callback that appends one entry at the end of the given segment id.
    Consumer<Long> appendOneEntry = segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        byte[] data = new byte[appendSize];
        long offset = sm.getLength();
        sm.setLength(offset + data.length);
        try {
            context.readIndex.append(segmentId, offset, data);
        } catch (StreamSegmentNotExistsException ex) {
            throw new CompletionException(ex);
        }
    };
    // Populate the ReadIndex with the Append entries (post-StorageOffset)
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        // Each time we make a round of appends (one per segment), we increment the generation in the CacheManager.
        context.cacheManager.applyCachePolicy();
    }
    // Read all the data from Storage, making sure we carefully associate them with the proper generation.
    for (int i = 0; i < preStorageEntryCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Storage, resultEntry.getType());
            resultEntry.requestContent(TIMEOUT);
            ReadResultEntryContents contents = resultEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.assertFalse("Not expecting more data to be available for reading.", result.hasNext());
            Assert.assertEquals("Unexpected ReadResultEntry length when trying to load up data into the ReadIndex Cache.", appendSize, contents.getLength());
        }
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (cache is not full).", 0, removedKeys.size());
    // Append more data (equivalent to all post-storage entries), and verify that NO entries are being evicted (we cannot evict post-storage entries).
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (only eligible entries were post-storage).", 0, removedKeys.size());
    // 'Touch' the first few entries read from storage. This should move them to the back of the queue (they won't be the first ones to be evicted).
    int touchCount = preStorageEntryCount / 3;
    for (int i = 0; i < touchCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Cache, resultEntry.getType());
        }
    }
    // Append more data (equivalent to the amount of data we 'touched'), and verify that the entries we just touched are not being removed..
    for (int i = 0; i < touchCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (we touched old entries and they now have the newest generation).", 0, removedKeys.size());
    // Increment the generations so that we are caught up to just before the generation where the "touched" items now live.
    context.cacheManager.applyCachePolicy();
    // We expect all but the 'touchCount' pre-Storage entries to be removed.
    int expectedRemovalCount = (preStorageEntryCount - touchCount) * SEGMENT_COUNT;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all pre-storage entries.", expectedRemovalCount, removedKeys.size());
    // Now update the metadata and indicate that all the post-storage data has been moved to storage.
    segmentIds.forEach(segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        sm.setStorageLength(sm.getLength());
    });
    // We add one artificial entry, which we'll be touching forever and ever; this forces the CacheManager to
    // update its current generation every time. We will be ignoring this entry for our test.
    SegmentMetadata readSegment = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
    appendOneEntry.accept(readSegment.getId());
    // Now evict everything (whether by size of by aging out).
    for (int i = 0; i < cachePolicy.getMaxGenerations(); i++) {
        @Cleanup ReadResult result = context.readIndex.read(readSegment.getId(), readSegment.getLength() - appendSize, appendSize, TIMEOUT);
        result.next();
        context.cacheManager.applyCachePolicy();
    }
    int expectedRemovalCountPerSegment = entriesPerSegment + touchCount + postStorageEntryCount;
    int expectedTotalRemovalCount = SEGMENT_COUNT * expectedRemovalCountPerSegment;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all the entries.", expectedTotalRemovalCount, removedKeys.size());
    // Finally, verify that the evicted items are in the correct order (for each segment). See this test's description for details.
    for (long segmentId : segmentIds) {
        List<CacheKey> segmentRemovedKeys = removedKeys.stream().filter(key -> key.getStreamSegmentId() == segmentId).collect(Collectors.toList());
        Assert.assertEquals("Unexpected number of removed entries for segment " + segmentId, expectedRemovalCountPerSegment, segmentRemovedKeys.size());
        // The correct order of eviction (N=entriesPerSegment) is: 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N.
        // This is equivalent to the following tests
        // 0.25N-1.25N
        checkOffsets(segmentRemovedKeys, segmentId, 0, entriesPerSegment, entriesPerSegment * appendSize / 4, appendSize);
        // 0..0.25N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment, entriesPerSegment / 4, 0, appendSize);
        // 1.25N..1.5N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment + entriesPerSegment / 4, entriesPerSegment / 4, (int) (entriesPerSegment * appendSize * 1.25), appendSize);
    }
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) CacheKey(io.pravega.segmentstore.server.CacheKey) Cleanup(lombok.Cleanup) Random(java.util.Random) InMemoryCache(io.pravega.segmentstore.storage.mocks.InMemoryCache) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) Collection(java.util.Collection) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) StreamHelpers(io.pravega.common.io.StreamHelpers) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) Cache(io.pravega.segmentstore.storage.Cache) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) CacheKey(io.pravega.segmentstore.server.CacheKey) lombok.val(lombok.val) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ByteArrayInputStream(java.io.ByteArrayInputStream) CompletionException(java.util.concurrent.CompletionException) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Example 19 with UpdateableSegmentMetadata

use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.

the class ContainerReadIndexTests method testBatchedRead.

/**
 * Tests the ability for the ReadIndex to batch multiple index entries together into a bigger read. This test
 * writes a lot of very small appends to the index, then issues a full read (from the beginning) while configuring
 * the read index to return results of no less than a particular size. As an added bonus, it also forces a Storage
 * Read towards the end to make sure the ReadIndex doesn't coalesce those into the result as well.
 */
@Test
public void testBatchedRead() throws Exception {
    final int totalAppendLength = 500 * 1000;
    final int maxAppendLength = 100;
    final int minReadLength = 16 * 1024;
    final byte[] segmentData = new byte[totalAppendLength];
    final Random rnd = new Random(0);
    rnd.nextBytes(segmentData);
    final ReadIndexConfig config = ConfigHelpers.withInfiniteCachePolicy(ReadIndexConfig.builder().with(ReadIndexConfig.MEMORY_READ_MIN_LENGTH, minReadLength)).build();
    @Cleanup TestContext context = new TestContext(config, config.getCachePolicy());
    // Create the segment in Storage and populate it with all the data (one segment is sufficient for this test).
    final long segmentId = createSegment(0, context);
    createSegmentsInStorage(context);
    final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
    val writeHandle = context.storage.openWrite(segmentMetadata.getName()).join();
    context.storage.write(writeHandle, 0, new ByteArrayInputStream(segmentData), segmentData.length, TIMEOUT).join();
    segmentMetadata.setStorageLength(segmentData.length);
    // Add the contents of the segment to the read index using very small appends (same data as in Storage).
    int writtenLength = 0;
    int remainingLength = totalAppendLength;
    int lastCacheOffset = -1;
    while (remainingLength > 0) {
        int appendLength = rnd.nextInt(maxAppendLength) + 1;
        if (appendLength < remainingLength) {
            // Make another append.
            byte[] appendData = new byte[appendLength];
            System.arraycopy(segmentData, writtenLength, appendData, 0, appendLength);
            appendSingleWrite(segmentId, appendData, context);
            writtenLength += appendLength;
            remainingLength -= appendLength;
        } else {
            // This would be the last append. Don't add it, so force the read index to load it from Storage.
            lastCacheOffset = writtenLength;
            appendLength = remainingLength;
            writtenLength += appendLength;
            remainingLength = 0;
            segmentMetadata.setLength(writtenLength);
        }
    }
    // Check all the appended data.
    @Cleanup ReadResult readResult = context.readIndex.read(segmentId, 0, totalAppendLength, TIMEOUT);
    long expectedCurrentOffset = 0;
    boolean encounteredStorageRead = false;
    while (readResult.hasNext()) {
        ReadResultEntry entry = readResult.next();
        if (entry.getStreamSegmentOffset() < lastCacheOffset) {
            Assert.assertEquals("Expecting only a Cache entry before switch offset.", ReadResultEntryType.Cache, entry.getType());
        } else {
            Assert.assertEquals("Expecting only a Storage entry on or after switch offset.", ReadResultEntryType.Storage, entry.getType());
            entry.requestContent(TIMEOUT);
            entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            encounteredStorageRead = true;
        }
        // Check the entry contents.
        byte[] entryData = new byte[entry.getContent().join().getLength()];
        StreamHelpers.readAll(entry.getContent().join().getData(), entryData, 0, entryData.length);
        AssertExtensions.assertArrayEquals("Unexpected data read at offset " + expectedCurrentOffset, segmentData, (int) expectedCurrentOffset, entryData, 0, entryData.length);
        expectedCurrentOffset += entryData.length;
        // cut short by the storage entry.
        if (expectedCurrentOffset < lastCacheOffset) {
            AssertExtensions.assertGreaterThanOrEqual("Expecting a ReadResultEntry of a minimum length for cache hit.", minReadLength, entryData.length);
        }
    }
    Assert.assertEquals("Not encountered any storage reads, even though one was forced.", lastCacheOffset > 0, encounteredStorageRead);
}
Also used : lombok.val(lombok.val) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) Random(java.util.Random) ByteArrayInputStream(java.io.ByteArrayInputStream) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) Test(org.junit.Test)

Example 20 with UpdateableSegmentMetadata

use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.

the class ContainerReadIndexTests method testReadDirect.

/**
 * Tests the readDirect() method on the ReadIndex.
 */
@Test
public void testReadDirect() throws Exception {
    final int randomAppendLength = 1024;
    @Cleanup TestContext context = new TestContext();
    ArrayList<Long> segmentIds = new ArrayList<>();
    final long segmentId = createSegment(0, context);
    final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
    segmentIds.add(segmentId);
    HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, 1, context);
    final long mergedTxId = transactionsBySegment.get(segmentId).get(0);
    // Add data to all segments.
    HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
    transactionsBySegment.values().forEach(segmentIds::addAll);
    appendData(segmentIds, segmentContents, context);
    // Mark everything so far (minus a few bytes) as being written to storage.
    segmentMetadata.setStorageLength(segmentMetadata.getLength() - 100);
    // Now partially merge a second transaction
    final long mergedTxOffset = beginMergeTransaction(mergedTxId, segmentMetadata, segmentContents, context);
    // Add one more append after all of this.
    final long endOfMergedDataOffset = segmentMetadata.getLength();
    byte[] appendData = new byte[randomAppendLength];
    new Random(0).nextBytes(appendData);
    appendSingleWrite(segmentId, appendData, context);
    recordAppend(segmentId, appendData, segmentContents);
    // Verify we are not allowed to read from the range which has already been committed to Storage (invalid arguments).
    for (AtomicLong offset = new AtomicLong(0); offset.get() < segmentMetadata.getStorageLength(); offset.incrementAndGet()) {
        AssertExtensions.assertThrows(String.format("readDirect allowed reading from an illegal offset (%s).", offset), () -> context.readIndex.readDirect(segmentId, offset.get(), 1), ex -> ex instanceof IllegalArgumentException);
    }
    // Verify that any reads overlapping a merged transaction return null (that is, we cannot retrieve the requested data).
    for (long offset = mergedTxOffset - 1; offset < endOfMergedDataOffset; offset++) {
        InputStream resultStream = context.readIndex.readDirect(segmentId, offset, 2);
        Assert.assertNull("readDirect() returned data overlapping a partially merged transaction", resultStream);
    }
    // Verify that we can read from any other offset.
    final byte[] expectedData = segmentContents.get(segmentId).toByteArray();
    BiConsumer<Long, Long> verifyReadResult = (startOffset, endOffset) -> {
        int readLength = (int) (endOffset - startOffset);
        while (readLength > 0) {
            InputStream actualDataStream;
            try {
                actualDataStream = context.readIndex.readDirect(segmentId, startOffset, readLength);
            } catch (StreamSegmentNotExistsException ex) {
                throw new CompletionException(ex);
            }
            Assert.assertNotNull(String.format("Unexpected result when data is readily available for Offset = %s, Length = %s.", startOffset, readLength), actualDataStream);
            byte[] actualData = new byte[readLength];
            try {
                int bytesCopied = StreamHelpers.readAll(actualDataStream, actualData, 0, readLength);
                Assert.assertEquals(String.format("Unexpected number of bytes read for Offset = %s, Length = %s (pre-partial-merge).", startOffset, readLength), readLength, bytesCopied);
            } catch (IOException ex) {
                // Technically not possible.
                throw new UncheckedIOException(ex);
            }
            AssertExtensions.assertArrayEquals("Unexpected data read from the segment at offset " + startOffset, expectedData, startOffset.intValue(), actualData, 0, actualData.length);
            // Setup the read for the next test (where we read 1 less byte than now).
            readLength--;
            if (readLength % 2 == 0) {
                // For every 2 bytes of decreased read length, increase the start offset by 1. This allows for a greater
                // number of combinations to be tested.
                startOffset++;
            }
        }
    };
    // Verify that we can read the cached data just after the StorageLength but before the merged transaction.
    verifyReadResult.accept(segmentMetadata.getStorageLength(), mergedTxOffset);
    // Verify that we can read the cached data just after the merged transaction but before the end of the segment.
    verifyReadResult.accept(endOfMergedDataOffset, segmentMetadata.getLength());
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AssertExtensions(io.pravega.test.common.AssertExtensions) CacheKey(io.pravega.segmentstore.server.CacheKey) Cleanup(lombok.Cleanup) Random(java.util.Random) InMemoryCache(io.pravega.segmentstore.storage.mocks.InMemoryCache) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) InMemoryStorageFactory(io.pravega.segmentstore.storage.mocks.InMemoryStorageFactory) Duration(java.time.Duration) Map(java.util.Map) CacheFactory(io.pravega.segmentstore.storage.CacheFactory) Collection(java.util.Collection) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentNameUtils(io.pravega.shared.segment.StreamSegmentNameUtils) UncheckedIOException(java.io.UncheckedIOException) List(java.util.List) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ConfigHelpers(io.pravega.segmentstore.server.ConfigHelpers) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) StreamHelpers(io.pravega.common.io.StreamHelpers) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) Cache(io.pravega.segmentstore.storage.Cache) InputStream(java.io.InputStream) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) HashMap(java.util.HashMap) ByteArrayInputStream(java.io.ByteArrayInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) UncheckedIOException(java.io.UncheckedIOException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) Cleanup(lombok.Cleanup) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) CompletionException(java.util.concurrent.CompletionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Aggregations

UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)52 Test (org.junit.Test)20 Cleanup (lombok.Cleanup)15 lombok.val (lombok.val)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)12 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)11 ArrayList (java.util.ArrayList)11 ReadResult (io.pravega.segmentstore.contracts.ReadResult)8 HashMap (java.util.HashMap)8 HashSet (java.util.HashSet)8 ReadResultEntry (io.pravega.segmentstore.contracts.ReadResultEntry)7 UpdateableContainerMetadata (io.pravega.segmentstore.server.UpdateableContainerMetadata)7 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)7 MetadataBuilder (io.pravega.segmentstore.server.MetadataBuilder)6 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)6 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)6 ByteArrayInputStream (java.io.ByteArrayInputStream)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)6 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)5 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)5