Search in sources :

Example 61 with BufferView

use of io.pravega.common.util.BufferView in project pravega by pravega.

the class SegmentAggregator method flushPendingAppends.

/**
 * Flushes all Append Operations that can be flushed up to the maximum allowed flush size.
 *
 * @param timeout  Timeout for the operation.
 * @return A CompletableFuture that, when completed, will contain the result from the flush operation.
 */
private CompletableFuture<WriterFlushResult> flushPendingAppends(Duration timeout) {
    // Gather an InputStream made up of all the operations we can flush.
    BufferView flushData;
    try {
        flushData = getFlushData();
    } catch (DataCorruptionException ex) {
        return Futures.failedFuture(ex);
    }
    long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
    // Flush them.
    TimeoutTimer timer = new TimeoutTimer(timeout);
    CompletableFuture<Void> flush;
    if (flushData == null || flushData.getLength() == 0) {
        flush = CompletableFuture.completedFuture(null);
    } else {
        flush = createSegmentIfNecessary(() -> this.storage.write(this.handle.get(), this.metadata.getStorageLength(), flushData.getReader(), flushData.getLength(), timer.getRemaining()), timer.getRemaining());
    }
    return flush.thenApplyAsync(v -> {
        WriterFlushResult result = updateStatePostFlush(flushData);
        LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
        return result;
    }, this.executor).exceptionally(ex -> {
        if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
            // We attempted to write at an offset that already contained other data. This can happen for a number of
            // reasons, but we do not have enough information here to determine why. We need to enter reconciliation
            // mode, which will determine the actual state of the segment in storage and take appropriate actions.
            setState(AggregatorState.ReconciliationNeeded);
        }
        // Rethrow all exceptions.
        throw new CompletionException(ex);
    });
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Cleanup(lombok.Cleanup) ServiceHaltException(io.pravega.segmentstore.server.ServiceHaltException) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) Predicate(java.util.function.Predicate) CompletionException(java.util.concurrent.CompletionException) ThreadSafe(javax.annotation.concurrent.ThreadSafe) GuardedBy(javax.annotation.concurrent.GuardedBy) Collectors(java.util.stream.Collectors) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) WriterSegmentProcessor(io.pravega.segmentstore.server.WriterSegmentProcessor) Futures(io.pravega.common.concurrent.Futures) Getter(lombok.Getter) SegmentRollingPolicy(io.pravega.segmentstore.storage.SegmentRollingPolicy) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) AbstractTimer(io.pravega.common.AbstractTimer) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Nullable(javax.annotation.Nullable) LoggerHelpers(io.pravega.common.LoggerHelpers) TimeoutTimer(io.pravega.common.TimeoutTimer) Executor(java.util.concurrent.Executor) AtomicLong(java.util.concurrent.atomic.AtomicLong) SegmentOperation(io.pravega.segmentstore.server.SegmentOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Preconditions(com.google.common.base.Preconditions) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) ArrayDeque(java.util.ArrayDeque) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) BufferView(io.pravega.common.util.BufferView) CompletionException(java.util.concurrent.CompletionException) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) TimeoutTimer(io.pravega.common.TimeoutTimer)

Example 62 with BufferView

use of io.pravega.common.util.BufferView in project pravega by pravega.

the class TableBucketReader method find.

/**
 * Attempts to locate something in a TableBucket that matches a particular key.
 *
 * @param soughtKey    A {@link BufferView} instance representing the Key we are looking for.
 * @param bucketOffset The current segment offset of the Table Bucket we are looking into.
 * @param timer        A {@link TimeoutTimer} for the operation.
 * @return A CompletableFuture that, when completed, will contain the desired result, or null of no such result
 * was found.
 */
CompletableFuture<ResultT> find(BufferView soughtKey, long bucketOffset, TimeoutTimer timer) {
    int maxReadLength = getMaxReadLength();
    // Read the Key at the current offset and check it against the sought one.
    AtomicLong offset = new AtomicLong(bucketOffset);
    CompletableFuture<ResultT> result = new CompletableFuture<>();
    Futures.loop(() -> !result.isDone(), () -> {
        // Read the Key from the Segment. Copy it out of the Segment to avoid losing it or getting corrupted
        // values back in case of a cache eviction. See {@link ReadResult#setCopyOnRead(boolean)}.
        ReadResult readResult = this.segment.read(offset.get(), maxReadLength, timer.getRemaining());
        val reader = getReader(soughtKey, offset.get(), timer);
        AsyncReadResultProcessor.process(readResult, reader, this.executor);
        return reader.getResult().thenComposeAsync(r -> {
            SearchContinuation sc = processResult(r, soughtKey);
            if (sc == SearchContinuation.ResultFound || sc == SearchContinuation.NoResult) {
                // We either definitely found the result or definitely did not find the result.
                // In the case we did not find what we were looking for, we may still have some
                // partial result to return to the caller (i.e., a TableEntry with no value, but with
                // a version, which indicates a deleted entry (as opposed from an inexistent one).
                result.complete(r);
            } else {
                return this.getBackpointer.apply(this.segment, offset.get(), timer.getRemaining()).thenAccept(newOffset -> {
                    offset.set(newOffset);
                    if (newOffset < 0) {
                        // Could not find anything.
                        result.complete(null);
                    }
                });
            }
            return CompletableFuture.completedFuture(null);
        }, this.executor);
    }, this.executor).exceptionally(ex -> {
        result.completeExceptionally(ex);
        return null;
    });
    return result;
}
Also used : lombok.val(lombok.val) TableKey(io.pravega.segmentstore.contracts.tables.TableKey) TimeoutTimer(io.pravega.common.TimeoutTimer) Executor(java.util.concurrent.Executor) NonNull(lombok.NonNull) RequiredArgsConstructor(lombok.RequiredArgsConstructor) lombok.val(lombok.val) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) AtomicLong(java.util.concurrent.atomic.AtomicLong) DirectSegmentAccess(io.pravega.segmentstore.server.DirectSegmentAccess) List(java.util.List) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) BiConsumer(java.util.function.BiConsumer) TableEntry(io.pravega.segmentstore.contracts.tables.TableEntry) Futures(io.pravega.common.concurrent.Futures) ReadResult(io.pravega.segmentstore.contracts.ReadResult) AsyncReadResultProcessor(io.pravega.segmentstore.server.reading.AsyncReadResultProcessor) AtomicLong(java.util.concurrent.atomic.AtomicLong) CompletableFuture(java.util.concurrent.CompletableFuture) ReadResult(io.pravega.segmentstore.contracts.ReadResult)

Example 63 with BufferView

use of io.pravega.common.util.BufferView in project pravega by pravega.

the class AsyncTableEntryReaderTests method testBufferCompaction.

private <T> void testBufferCompaction(GetEntryReader<T> createReader, Function<T, TableKey> getKey, Function<T, BufferView> getValue) throws Exception {
    // Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing).
    val keyLength = 3987;
    // Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing)..
    val valueLength = 3123;
    val serializer = new EntrySerializer();
    // Generate a number of entries. We only care about the first one, but we want to ensure that we have enough other
    // data to force the ReadResult to try to read more.
    val testItems = generateTestItems(() -> keyLength, () -> valueLength);
    val entries = testItems.stream().filter(i -> !i.isRemoval).map(i -> TableEntry.unversioned(new ByteArraySegment(i.key), new ByteArraySegment(i.value))).collect(Collectors.toList());
    // Search for the first Key/Entry. This makes it easier as we don't have to guess the versions, offsets, etc.
    val soughtEntry = entries.get(0);
    val segmentData = serializer.serializeUpdate(entries).getCopy();
    @Cleanup val readResultNoCompact = new ReadResultMock(segmentData, keyLength + valueLength + 20, keyLength + 200);
    val readerNoCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
    testBufferCompaction(readerNoCompact, readResultNoCompact, getKey, getValue, false);
    @Cleanup val readResultWithCompact = new ReadResultMock(segmentData, segmentData.length, segmentData.length);
    val readerWithCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
    testBufferCompaction(readerWithCompact, readResultWithCompact, getKey, getValue, true);
}
Also used : lombok.val(lombok.val) AssertExtensions(io.pravega.test.common.AssertExtensions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) Cleanup(lombok.Cleanup) Random(java.util.Random) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) ReadResultMock(io.pravega.segmentstore.server.ReadResultMock) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Timeout(org.junit.rules.Timeout) SerializationException(io.pravega.common.io.SerializationException) AsyncReadResultProcessor(io.pravega.segmentstore.server.reading.AsyncReadResultProcessor) Nullable(javax.annotation.Nullable) TableKey(io.pravega.segmentstore.contracts.tables.TableKey) TimeoutTimer(io.pravega.common.TimeoutTimer) lombok.val(lombok.val) Test(org.junit.Test) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Assert(org.junit.Assert) TableEntry(io.pravega.segmentstore.contracts.tables.TableEntry) Collections(java.util.Collections) ByteArraySegment(io.pravega.common.util.ByteArraySegment) ReadResultMock(io.pravega.segmentstore.server.ReadResultMock) Cleanup(lombok.Cleanup) TimeoutTimer(io.pravega.common.TimeoutTimer)

Example 64 with BufferView

use of io.pravega.common.util.BufferView in project pravega by pravega.

the class FixedKeyLengthTableCompactorTests method testCompactionConcurrentUpdate.

/**
 * Tests the case when a compaction executes concurrently with one of compact-copied keys being updated. This is a
 * scenario specific to the Fixed-Key-Length Table Segment as the indexing is done at the time of the update
 * and not in the background (and hence in sequence).
 */
@Test
public void testCompactionConcurrentUpdate() {
    @Cleanup val context = createContext(KEY_COUNT * UPDATE_ENTRY_LENGTH);
    val rnd = new Random(0);
    // Generate keys.
    val keys = new ArrayList<BufferView>();
    val expectedValues = new HashMap<BufferView, BufferView>();
    for (int i = 0; i < KEY_COUNT; i++) {
        byte[] key = new byte[KEY_LENGTH];
        rnd.nextBytes(key);
        keys.add(new ByteArraySegment(key));
    }
    // Set utilization threshold to 76% so that we may trigger a compaction when we update half the keys once.
    context.setSegmentState(0, 0, 0, 0, 76);
    // Insert all the keys ...
    for (val key : keys) {
        expectedValues.put(key, updateKey(key, context, rnd));
    }
    // ... then update the second half. This should require a compaction which results in a copy.
    for (int i = keys.size() / 2; i < keys.size(); i++) {
        expectedValues.put(keys.get(i), updateKey(keys.get(i), context, rnd));
    }
    val originalLength = context.segmentMetadata.getLength();
    val c = context.getCompactor();
    Assert.assertEquals("Unexpected number of unique entries pre-compaction.", expectedValues.size(), (long) c.getUniqueEntryCount().join());
    Assert.assertEquals("Unexpected number of total entries pre-compaction.", expectedValues.size() + expectedValues.size() / 2, IndexReader.getTotalEntryCount(context.segmentMetadata));
    Assert.assertTrue("Expecting a compaction to be required.", c.isCompactionRequired().join());
    context.segment.setBeforeAppendCallback(() -> {
        // This callback is invoked while the compactor is running; it is after it has read and processed all candidates
        // and immediately before the conditional append it performs is about to be executed.
        // We can now update one of the keys that are copied with a new value.
        // Make sure we don't end up in an infinite loop here.
        context.segment.setBeforeAppendCallback(null);
        val firstKey = keys.get(0);
        expectedValues.put(firstKey, updateKey(firstKey, context, rnd));
    });
    c.compact(new TimeoutTimer(TIMEOUT)).join();
    // We should now verify that the compaction did eventually succeed and that all the keys have the correct (expected) values.
    AssertExtensions.assertGreaterThan("Segment length did not change.", originalLength, context.segmentMetadata.getLength());
    AssertExtensions.assertGreaterThan("No compaction occurred.", 0, IndexReader.getCompactionOffset(context.segmentMetadata));
    Assert.assertEquals("Unexpected number of unique entries post-compaction.", expectedValues.size(), (long) c.getUniqueEntryCount().join());
    Assert.assertEquals("Unexpected number of total entries post-compaction.", expectedValues.size(), IndexReader.getTotalEntryCount(context.segmentMetadata));
    // Read all the entries from the segment and validate that they are as expected.
    val actualValues = new HashMap<BufferView, BufferView>();
    context.segment.attributeIterator(AttributeId.Variable.minValue(KEY_LENGTH), AttributeId.Variable.maxValue(KEY_LENGTH), TIMEOUT).join().forEachRemaining(attributeValues -> {
        for (val av : attributeValues) {
            val reader = BufferView.wrap(context.segment.read(av.getValue(), UPDATE_ENTRY_LENGTH, TIMEOUT).readRemaining(UPDATE_ENTRY_LENGTH, TIMEOUT)).getBufferViewReader();
            try {
                val e = AsyncTableEntryReader.readEntryComponents(reader, av.getValue(), context.serializer);
                Assert.assertEquals("Mismatch keys.", av.getKey().toBuffer(), e.getKey());
                actualValues.put(e.getKey(), e.getValue());
            } catch (SerializationException ex) {
                throw new CompletionException(ex);
            }
        }
    }, executorService()).join();
    AssertExtensions.assertMapEquals("Unexpected entries in the segment after compaction.", expectedValues, actualValues);
}
Also used : lombok.val(lombok.val) IntStream(java.util.stream.IntStream) TableAttributes(io.pravega.segmentstore.contracts.tables.TableAttributes) Getter(lombok.Getter) AssertExtensions(io.pravega.test.common.AssertExtensions) Cleanup(lombok.Cleanup) HashMap(java.util.HashMap) Random(java.util.Random) ArrayList(java.util.ArrayList) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) BufferView(io.pravega.common.util.BufferView) SerializationException(io.pravega.common.io.SerializationException) Attributes(io.pravega.segmentstore.contracts.Attributes) ImmutableMap(com.google.common.collect.ImmutableMap) TimeoutTimer(io.pravega.common.TimeoutTimer) AttributeId(io.pravega.segmentstore.contracts.AttributeId) lombok.val(lombok.val) CompletionException(java.util.concurrent.CompletionException) Test(org.junit.Test) Collectors(java.util.stream.Collectors) AttributeUpdateCollection(io.pravega.segmentstore.contracts.AttributeUpdateCollection) ByteArraySegment(io.pravega.common.util.ByteArraySegment) AttributeUpdateType(io.pravega.segmentstore.contracts.AttributeUpdateType) Assert(org.junit.Assert) TableEntry(io.pravega.segmentstore.contracts.tables.TableEntry) Collections(java.util.Collections) ByteArraySegment(io.pravega.common.util.ByteArraySegment) SerializationException(io.pravega.common.io.SerializationException) Random(java.util.Random) HashMap(java.util.HashMap) CompletionException(java.util.concurrent.CompletionException) ArrayList(java.util.ArrayList) Cleanup(lombok.Cleanup) TimeoutTimer(io.pravega.common.TimeoutTimer) Test(org.junit.Test)

Example 65 with BufferView

use of io.pravega.common.util.BufferView in project pravega by pravega.

the class IndexReaderWriterTests method updateKeys.

private long updateKeys(Map<BufferView, Long> keysWithOffset, IndexWriter w, HashMap<Long, BufferView> existingKeys, SegmentMock segment) {
    val timer = new TimeoutTimer(TIMEOUT);
    val keyUpdates = keysWithOffset.entrySet().stream().map(e -> new BucketUpdate.KeyUpdate(e.getKey(), decodeOffset(e.getValue()), decodeOffset(e.getValue()), isRemoveOffset(e.getValue()))).sorted(Comparator.comparingLong(BucketUpdate.KeyUpdate::getOffset)).collect(Collectors.toList());
    // This is the value that we will set TABLE_INDEX_NODE to. It is not any key's offset (and we don't really care what its value is)
    long firstKeyOffset = keyUpdates.get(0).getOffset();
    long postIndexOffset = keyUpdates.get(keyUpdates.size() - 1).getOffset() + 2 * MAX_KEY_LENGTH;
    // Generate the BucketUpdate for the key.
    val builders = w.groupByBucket(segment, keyUpdates, timer).join();
    // Fetch existing keys.
    val oldOffsets = new ArrayList<Long>();
    val entryCount = new AtomicLong(IndexReader.getEntryCount(segment.getInfo()));
    long initialTotalEntryCount = IndexReader.getTotalEntryCount(segment.getInfo());
    int totalEntryCountDelta = 0;
    val bucketUpdates = new ArrayList<BucketUpdate>();
    for (val builder : builders) {
        w.getBucketOffsets(segment, builder.getBucket(), timer).join().forEach(offset -> {
            BufferView existingKey = existingKeys.getOrDefault(offset, null);
            Assert.assertNotNull("Existing bucket points to non-existing key.", existingKey);
            builder.withExistingKey(new BucketUpdate.KeyInfo(existingKey, offset, offset));
            // Key replacement; remove this offset.
            if (keysWithOffset.containsKey(existingKey)) {
                oldOffsets.add(offset);
                // Replaced or removed, we'll add it back if replaced.
                entryCount.decrementAndGet();
            }
        });
        // Add back the count of all keys that have been updated or added; we've already discounted all updates, insertions
        // and removals above, so adding just the updates and insertions will ensure the expected count is accurate.
        val bu = builder.build();
        bucketUpdates.add(bu);
        val deletedCount = bu.getKeyUpdates().stream().filter(BucketUpdate.KeyUpdate::isDeleted).count();
        entryCount.addAndGet(bu.getKeyUpdates().size() - deletedCount);
        totalEntryCountDelta += bu.getKeyUpdates().size();
    }
    // Apply the updates.
    val attrCount = w.updateBuckets(segment, bucketUpdates, firstKeyOffset, postIndexOffset, totalEntryCountDelta, TIMEOUT).join();
    AssertExtensions.assertGreaterThan("Expected at least one attribute to be modified.", 0, attrCount);
    checkEntryCount(entryCount.get(), segment);
    checkTotalEntryCount(initialTotalEntryCount + totalEntryCountDelta, segment);
    // Record the key as being updated.
    oldOffsets.forEach(existingKeys::remove);
    keysWithOffset.forEach((key, offset) -> {
        if (isRemoveOffset(offset)) {
            existingKeys.remove(decodeOffset(offset), key);
        } else {
            existingKeys.put(decodeOffset(offset), key);
        }
    });
    return postIndexOffset;
}
Also used : lombok.val(lombok.val) AtomicLong(java.util.concurrent.atomic.AtomicLong) BufferView(io.pravega.common.util.BufferView) ArrayList(java.util.ArrayList) TimeoutTimer(io.pravega.common.TimeoutTimer)

Aggregations

BufferView (io.pravega.common.util.BufferView)77 ArrayList (java.util.ArrayList)49 lombok.val (lombok.val)49 ByteArraySegment (io.pravega.common.util.ByteArraySegment)44 Cleanup (lombok.Cleanup)42 Duration (java.time.Duration)39 Test (org.junit.Test)39 List (java.util.List)37 CompletableFuture (java.util.concurrent.CompletableFuture)34 AssertExtensions (io.pravega.test.common.AssertExtensions)29 HashMap (java.util.HashMap)29 Assert (org.junit.Assert)29 ThreadPooledTestSuite (io.pravega.test.common.ThreadPooledTestSuite)28 TimeUnit (java.util.concurrent.TimeUnit)28 AtomicReference (java.util.concurrent.atomic.AtomicReference)26 Collectors (java.util.stream.Collectors)26 AtomicLong (java.util.concurrent.atomic.AtomicLong)25 Exceptions (io.pravega.common.Exceptions)24 TableEntry (io.pravega.segmentstore.contracts.tables.TableEntry)24 Map (java.util.Map)22