Search in sources :

Example 6 with SegmentMetadata

use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.

the class ContainerReadIndexTests method createTransactions.

private HashMap<Long, ArrayList<Long>> createTransactions(Collection<Long> segmentIds, int transactionsPerSegment, TestContext context) {
    // Create the Transactions.
    HashMap<Long, ArrayList<Long>> transactions = new HashMap<>();
    long transactionId = Integer.MAX_VALUE;
    for (long parentId : segmentIds) {
        ArrayList<Long> segmentTransactions = new ArrayList<>();
        transactions.put(parentId, segmentTransactions);
        SegmentMetadata parentMetadata = context.metadata.getStreamSegmentMetadata(parentId);
        for (int i = 0; i < transactionsPerSegment; i++) {
            segmentTransactions.add(createTransaction(parentMetadata, transactionId, context));
            transactionId++;
        }
    }
    return transactions;
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) HashMap(java.util.HashMap) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArrayList(java.util.ArrayList)

Example 7 with SegmentMetadata

use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.

the class StorageWriterTests method evictSegments.

private Collection<Long> evictSegments(long cutoffSeqNo, TestContext context) {
    EvictableMetadata metadata = (EvictableMetadata) context.metadata;
    Collection<SegmentMetadata> evictionCandidates = metadata.getEvictionCandidates(cutoffSeqNo, Integer.MAX_VALUE);
    metadata.cleanup(evictionCandidates, cutoffSeqNo);
    return evictionCandidates.stream().map(SegmentMetadata::getId).collect(Collectors.toSet());
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata)

Example 8 with SegmentMetadata

use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.

the class TestWriterDataSource method getAppendData.

@Override
public BufferView getAppendData(long streamSegmentId, long startOffset, int length) {
    AppendData ad;
    synchronized (this.lock) {
        ErrorInjector.throwSyncExceptionIfNeeded(this.getAppendDataErrorInjector);
        // Perform the same validation checks as the ReadIndex would do.
        SegmentMetadata sm = this.metadata.getStreamSegmentMetadata(streamSegmentId);
        if (sm.isDeleted()) {
            // StorageWriterFactory.WriterDataSource returns null for inexistent segments.
            return null;
        }
        Preconditions.checkArgument(length >= 0, "length must be a non-negative number");
        Preconditions.checkArgument(startOffset >= sm.getStorageLength(), "startOffset (%s) must refer to an offset beyond the Segment's StorageLength offset(%s).", startOffset, sm.getStorageLength());
        Preconditions.checkArgument(startOffset + length <= sm.getLength(), "startOffset+length must be less than the length of the Segment.");
        Preconditions.checkArgument(startOffset >= Math.min(sm.getStartOffset(), sm.getStorageLength()), "startOffset is before the Segment's StartOffset.");
        ad = this.appendData.getOrDefault(streamSegmentId, null);
    }
    if (ad == null) {
        return null;
    }
    return ad.read(startOffset, length);
}
Also used : UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata)

Example 9 with SegmentMetadata

use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.

the class ContainerMetadataUpdateTransactionTests method testTransientSegmentExtendedAttributeLimit.

/**
 * Tests that a Transient Segment may only have {@link SegmentMetadataUpdateTransaction#TRANSIENT_ATTRIBUTE_LIMIT}
 * or fewer Extended Attributes.
 */
@Test
public void testTransientSegmentExtendedAttributeLimit() throws ContainerException, StreamSegmentException {
    // Create base metadata with one Transient Segment.
    UpdateableContainerMetadata metadata = createMetadataTransient();
    int expected = 1;
    Assert.assertEquals("Unexpected initial Active Segment Count for base metadata.", expected, metadata.getActiveSegmentCount());
    // Create an UpdateTransaction containing updates for Extended Attributes on the Transient Segment -- it should succeed.
    val txn1 = new ContainerMetadataUpdateTransaction(metadata, metadata, 0);
    Assert.assertEquals("Unexpected Active Segment Count for first transaction.", expected, txn1.getActiveSegmentCount());
    // Subtract one from remaining due to Segment Type Attribute.
    long id = SegmentMetadataUpdateTransaction.TRANSIENT_ATTRIBUTE_LIMIT - (TRANSIENT_ATTRIBUTE_REMAINING - 1);
    AttributeUpdateCollection attributes = AttributeUpdateCollection.from(// The new entry.
    new AttributeUpdate(AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX + 1, id), AttributeUpdateType.None, id), // Update two old entries.
    new AttributeUpdate(AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX + 1, 0), AttributeUpdateType.Replace, (long) 1), new AttributeUpdate(AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX + 1, 1), AttributeUpdateType.Replace, (long) 2));
    val map1 = createTransientAppend(TRANSIENT_SEGMENT_ID, attributes);
    txn1.preProcessOperation(map1);
    map1.setSequenceNumber(metadata.nextOperationSequenceNumber());
    txn1.acceptOperation(map1);
    int expectedExtendedAttributes = SegmentMetadataUpdateTransaction.TRANSIENT_ATTRIBUTE_LIMIT - (TRANSIENT_ATTRIBUTE_REMAINING - 1);
    SegmentMetadata segmentMetadata = txn1.getStreamSegmentMetadata(TRANSIENT_SEGMENT_ID);
    Assert.assertEquals("Unexpected Extended Attribute count after first transaction.", expectedExtendedAttributes, segmentMetadata.getAttributes().size());
    val txn2 = new ContainerMetadataUpdateTransaction(txn1, metadata, 1);
    // Add two new Extended Attributes which should exceed the set limit.
    attributes = AttributeUpdateCollection.from(new AttributeUpdate(AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX + 1, ++id), AttributeUpdateType.None, (long) 0), new AttributeUpdate(AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX + 1, ++id), AttributeUpdateType.None, (long) 0));
    // Should not fail as there was space before the operation, so accept any new additions.
    val map2 = createTransientAppend(TRANSIENT_SEGMENT_ID, attributes);
    txn2.preProcessOperation(map2);
    txn2.acceptOperation(map2);
    // Since we are now over the limit, enforce that no new Extended Attributes may be added.
    val txn3 = new ContainerMetadataUpdateTransaction(txn2, metadata, 2);
    // Expect another Attribute addition to fail as the limit has been exceeded.
    val map3 = createTransientAppend(TRANSIENT_SEGMENT_ID, AttributeUpdateCollection.from(new AttributeUpdate(AttributeId.uuid(Attributes.CORE_ATTRIBUTE_ID_PREFIX + 1, ++id), AttributeUpdateType.None, (long) 0)));
    AssertExtensions.assertThrows("Exception was not thrown when too many Extended Attributes were registered.", () -> txn3.preProcessOperation(map3), ex -> ex instanceof MetadataUpdateException);
}
Also used : lombok.val(lombok.val) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentMetadata(io.pravega.segmentstore.server.containers.StreamSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) DynamicAttributeUpdate(io.pravega.segmentstore.contracts.DynamicAttributeUpdate) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) Test(org.junit.Test)

Example 10 with SegmentMetadata

use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.

the class ContainerReadIndexTests method testCacheEviction.

/**
 * Tests the ability to evict entries from the ReadIndex under various conditions:
 * * If an entry is aged out
 * * If an entry is pushed out because of cache space pressure.
 *
 * This also verifies that certain entries, such as RedirectReadIndexEntries and entries after the Storage Offset are
 * not removed.
 *
 * The way this test goes is as follows (it's pretty subtle, because there aren't many ways to hook into the ReadIndex and see what it's doing)
 * 1. It creates a bunch of segments, and populates them in storage (each) up to offset N/2-1 (this is called pre-storage)
 * 2. It populates the ReadIndex for each of those segments from offset N/2 to offset N-1 (this is called post-storage)
 * 3. It loads all the data from Storage into the ReadIndex, in entries of size equal to those already loaded in step #2.
 * 3a. At this point, all the entries added in step #2 have Generations 0..A/4-1, and step #3 have generations A/4..A-1
 * 4. Append more data at the end. This forces the generation to increase to 1.25A.
 * 4a. Nothing should be evicted from the cache now, since the earliest items are all post-storage.
 * 5. We 'touch' (read) the first 1/3 of pre-storage entries (offsets 0..N/4).
 * 5a. At this point, those entries (offsets 0..N/6) will have the newest generations (1.25A..1.5A)
 * 6. We append more data (equivalent to the data we touched)
 * 6a. Nothing should be evicted, since those generations that were just eligible for removal were touched and bumped up.
 * 7. We forcefully increase the current generation by 1 (without touching the ReadIndex)
 * 7a. At this point, we expect all the pre-storage items, except the touched ones, to be evicted. This is generations 0.25A-0.75A.
 * 8. Update the metadata and indicate that all the post-storage entries are now pre-storage and bump the generation by 0.75A.
 * 8a. At this point, we expect all former post-storage items and pre-storage items to be evicted (in this order).
 * <p>
 * The final order of eviction (in terms of offsets, for each segment), is:
 * * 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N (remember that we added quite a bunch of items after the initial run).
 */
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testCacheEviction() throws Exception {
    // Create a CachePolicy with a set number of generations and a known max size.
    // Each generation contains exactly one entry, so the number of generations is also the number of entries.
    // We append one byte at each time. This allows us to test edge cases as well by having the finest precision when
    // it comes to selecting which bytes we want evicted and which kept.
    final int appendSize = 1;
    // This also doubles as number of generations (each generation, we add one append for each segment).
    final int entriesPerSegment = 100;
    final int cacheMaxSize = SEGMENT_COUNT * entriesPerSegment * appendSize;
    // 25% of the entries are beyond the StorageOffset
    final int postStorageEntryCount = entriesPerSegment / 4;
    // 75% of the entries are before the StorageOffset.
    final int preStorageEntryCount = entriesPerSegment - postStorageEntryCount;
    CachePolicy cachePolicy = new CachePolicy(cacheMaxSize, 1.0, 1.0, Duration.ofMillis(1000 * 2 * entriesPerSegment), Duration.ofMillis(1000));
    // To properly test this, we want predictable storage reads.
    ReadIndexConfig config = ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, appendSize).build();
    ArrayList<Integer> removedEntries = new ArrayList<>();
    @Cleanup TestContext context = new TestContext(config, cachePolicy);
    // To ease our testing, we disable appends and instruct the TestCache to report the same value for UsedBytes as it
    // has for StoredBytes. This shields us from having to know internal details about the layout of the cache.
    context.cacheStorage.usedBytesSameAsStoredBytes = true;
    context.cacheStorage.disableAppends = true;
    // Record every cache removal.
    context.cacheStorage.deleteCallback = removedEntries::add;
    // Create the segments (metadata + storage).
    ArrayList<Long> segmentIds = createSegments(context);
    createSegmentsInStorage(context);
    // Populate the Storage with appropriate data.
    byte[] preStorageData = new byte[preStorageEntryCount * appendSize];
    for (long segmentId : segmentIds) {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        val handle = context.storage.openWrite(sm.getName()).join();
        context.storage.write(handle, 0, new ByteArrayInputStream(preStorageData), preStorageData.length, TIMEOUT).join();
        sm.setStorageLength(preStorageData.length);
        sm.setLength(preStorageData.length);
    }
    val cacheMappings = new HashMap<Integer, SegmentOffset>();
    // Callback that appends one entry at the end of the given segment id.
    Consumer<Long> appendOneEntry = segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        byte[] data = new byte[appendSize];
        long offset = sm.getLength();
        sm.setLength(offset + data.length);
        try {
            context.cacheStorage.insertCallback = address -> cacheMappings.put(address, new SegmentOffset(segmentId, offset));
            context.readIndex.append(segmentId, offset, new ByteArraySegment(data));
        } catch (StreamSegmentNotExistsException ex) {
            throw new CompletionException(ex);
        }
    };
    // Populate the ReadIndex with the Append entries (post-StorageOffset)
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        // Each time we make a round of appends (one per segment), we increment the generation in the CacheManager.
        context.cacheManager.applyCachePolicy();
    }
    // Read all the data from Storage, making sure we carefully associate them with the proper generation.
    for (int i = 0; i < preStorageEntryCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Storage, resultEntry.getType());
            CompletableFuture<Void> insertedInCache = new CompletableFuture<>();
            context.cacheStorage.insertCallback = address -> {
                cacheMappings.put(address, new SegmentOffset(segmentId, offset));
                insertedInCache.complete(null);
            };
            resultEntry.requestContent(TIMEOUT);
            BufferView contents = resultEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            Assert.assertFalse("Not expecting more data to be available for reading.", result.hasNext());
            Assert.assertEquals("Unexpected ReadResultEntry length when trying to load up data into the ReadIndex Cache.", appendSize, contents.getLength());
            // Wait for the entry to be inserted into the cache before moving on.
            insertedInCache.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
        }
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (cache is not full).", 0, removedEntries.size());
    // Append more data (equivalent to all post-storage entries), and verify that NO entries are being evicted (we cannot evict post-storage entries).
    for (int i = 0; i < postStorageEntryCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (only eligible entries were post-storage).", 0, removedEntries.size());
    // 'Touch' the first few entries read from storage. This should move them to the back of the queue (they won't be the first ones to be evicted).
    int touchCount = preStorageEntryCount / 3;
    for (int i = 0; i < touchCount; i++) {
        long offset = i * appendSize;
        for (long segmentId : segmentIds) {
            @Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
            ReadResultEntry resultEntry = result.next();
            Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Cache, resultEntry.getType());
        }
    }
    // Append more data (equivalent to the amount of data we 'touched'), and verify that the entries we just touched are not being removed..
    for (int i = 0; i < touchCount; i++) {
        segmentIds.forEach(appendOneEntry);
        context.cacheManager.applyCachePolicy();
    }
    Assert.assertEquals("Not expecting any removed Cache entries at this point (we touched old entries and they now have the newest generation).", 0, removedEntries.size());
    // Increment the generations so that we are caught up to just before the generation where the "touched" items now live.
    context.cacheManager.applyCachePolicy();
    // We expect all but the 'touchCount' pre-Storage entries to be removed.
    int expectedRemovalCount = (preStorageEntryCount - touchCount) * SEGMENT_COUNT;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all pre-storage entries.", expectedRemovalCount, removedEntries.size());
    // Now update the metadata and indicate that all the post-storage data has been moved to storage.
    segmentIds.forEach(segmentId -> {
        UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
        sm.setStorageLength(sm.getLength());
    });
    // We add one artificial entry, which we'll be touching forever and ever; this forces the CacheManager to
    // update its current generation every time. We will be ignoring this entry for our test.
    SegmentMetadata readSegment = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
    appendOneEntry.accept(readSegment.getId());
    // Now evict everything (whether by size of by aging out).
    for (int i = 0; i < cachePolicy.getMaxGenerations(); i++) {
        @Cleanup ReadResult result = context.readIndex.read(readSegment.getId(), readSegment.getLength() - appendSize, appendSize, TIMEOUT);
        result.next();
        context.cacheManager.applyCachePolicy();
    }
    int expectedRemovalCountPerSegment = entriesPerSegment + touchCount + postStorageEntryCount;
    int expectedTotalRemovalCount = SEGMENT_COUNT * expectedRemovalCountPerSegment;
    Assert.assertEquals("Unexpected number of removed entries after having forced out all the entries.", expectedTotalRemovalCount, removedEntries.size());
    // Finally, verify that the evicted items are in the correct order (for each segment). See this test's description for details.
    for (long segmentId : segmentIds) {
        List<SegmentOffset> segmentRemovedKeys = removedEntries.stream().map(cacheMappings::get).filter(e -> e.segmentId == segmentId).collect(Collectors.toList());
        Assert.assertEquals("Unexpected number of removed entries for segment " + segmentId, expectedRemovalCountPerSegment, segmentRemovedKeys.size());
        // The correct order of eviction (N=entriesPerSegment) is: 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N.
        // This is equivalent to the following tests
        // 0.25N-1.25N
        checkOffsets(segmentRemovedKeys, segmentId, 0, entriesPerSegment, entriesPerSegment * appendSize / 4, appendSize);
        // 0..0.25N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment, entriesPerSegment / 4, 0, appendSize);
        // 1.25N..1.5N
        checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment + entriesPerSegment / 4, entriesPerSegment / 4, (int) (entriesPerSegment * appendSize * 1.25), appendSize);
    }
}
Also used : Arrays(java.util.Arrays) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) ReadOnlyStorage(io.pravega.segmentstore.storage.ReadOnlyStorage) RequiredArgsConstructor(lombok.RequiredArgsConstructor) TimeoutException(java.util.concurrent.TimeoutException) Cleanup(lombok.Cleanup) Random(java.util.Random) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) ByteArrayInputStream(java.io.ByteArrayInputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Map(java.util.Map) CachePolicy(io.pravega.segmentstore.server.CachePolicy) TestCacheManager(io.pravega.segmentstore.server.TestCacheManager) CancellationException(java.util.concurrent.CancellationException) Collection(java.util.Collection) InMemoryStorage(io.pravega.segmentstore.storage.mocks.InMemoryStorage) CompletionException(java.util.concurrent.CompletionException) ReadResultEntryType(io.pravega.segmentstore.contracts.ReadResultEntryType) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) StreamSegmentMetadata(io.pravega.segmentstore.server.containers.StreamSegmentMetadata) List(java.util.List) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) DirectMemoryCache(io.pravega.segmentstore.storage.cache.DirectMemoryCache) TestUtils(io.pravega.test.common.TestUtils) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) TestStorage(io.pravega.segmentstore.server.TestStorage) ObjectClosedException(io.pravega.common.ObjectClosedException) MetadataBuilder(io.pravega.segmentstore.server.MetadataBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Getter(lombok.Getter) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) CacheStorage(io.pravega.segmentstore.storage.cache.CacheStorage) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) EvictableMetadata(io.pravega.segmentstore.server.EvictableMetadata) UpdateableContainerMetadata(io.pravega.segmentstore.server.UpdateableContainerMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) CacheState(io.pravega.segmentstore.storage.cache.CacheState) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) BiConsumer(java.util.function.BiConsumer) Timeout(org.junit.rules.Timeout) ReusableLatch(io.pravega.common.util.ReusableLatch) StreamSegmentTruncatedException(io.pravega.segmentstore.contracts.StreamSegmentTruncatedException) NameUtils(io.pravega.shared.NameUtils) IntentionalException(io.pravega.test.common.IntentionalException) lombok.val(lombok.val) IOException(java.io.IOException) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) Mockito(org.mockito.Mockito) Rule(org.junit.Rule) Assert(org.junit.Assert) Collections(java.util.Collections) ByteArraySegment(io.pravega.common.util.ByteArraySegment) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) BufferView(io.pravega.common.util.BufferView) lombok.val(lombok.val) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) StreamSegmentMetadata(io.pravega.segmentstore.server.containers.StreamSegmentMetadata) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) CachePolicy(io.pravega.segmentstore.server.CachePolicy) ByteArrayInputStream(java.io.ByteArrayInputStream) CompletionException(java.util.concurrent.CompletionException) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Aggregations

SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)58 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)41 lombok.val (lombok.val)25 ArrayList (java.util.ArrayList)24 Test (org.junit.Test)23 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)20 Duration (java.time.Duration)18 HashMap (java.util.HashMap)18 CompletableFuture (java.util.concurrent.CompletableFuture)18 Futures (io.pravega.common.concurrent.Futures)17 Collectors (java.util.stream.Collectors)17 Cleanup (lombok.Cleanup)17 Exceptions (io.pravega.common.Exceptions)15 StreamSegmentMetadata (io.pravega.segmentstore.server.containers.StreamSegmentMetadata)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)15 UpdateableContainerMetadata (io.pravega.segmentstore.server.UpdateableContainerMetadata)14 Collection (java.util.Collection)14 Map (java.util.Map)14 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)13 MetadataBuilder (io.pravega.segmentstore.server.MetadataBuilder)13