Search in sources :

Example 11 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregatorTests method testSealAlreadySealed.

/**
 * Tests the flush() method when it has a StreamSegmentSealOperation but the Segment is already sealed in Storage.
 */
@Test
public void testSealAlreadySealed() throws Exception {
    // Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
    @Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
    context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
    context.segmentAggregator.initialize(TIMEOUT).join();
    // Generate and add a Seal Operation.
    StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
    context.segmentAggregator.add(sealOp);
    // Seal the segment in Storage, behind the scenes.
    context.storage.seal(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), TIMEOUT).join();
    // Call flush and verify no exception is thrown.
    context.segmentAggregator.flush(TIMEOUT).join();
    // Verify data - even though already sealed, make sure the metadata is updated accordingly.
    Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 12 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregator method flushPendingTruncate.

/**
 * Flushes a pending StreamSegmentTruncateOperation, if that is the next pending one.
 *
 * @param flushResult The FlushResult of the operation just prior to this.
 * @param timeout     Timeout for the operation.
 * @return A CompletableFuture that, when completed, will contain the result from the flush operation, merged in with
 * the one passed in as input.
 */
private CompletableFuture<FlushResult> flushPendingTruncate(FlushResult flushResult, Duration timeout) {
    StorageOperation op = this.operations.getFirst();
    if (!isTruncateOperation(op) || !this.storage.supportsTruncation()) {
        // Nothing to do.
        return CompletableFuture.completedFuture(flushResult);
    }
    long truncateOffset = Math.min(this.metadata.getStorageLength(), op.getStreamSegmentOffset());
    return this.storage.truncate(this.handle.get(), truncateOffset, timeout).thenApplyAsync(v -> {
        updateStatePostTruncate();
        return flushResult;
    }, this.executor);
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation)

Example 13 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregator method getOrCreateAggregatedAppend.

/**
 * Gets an existing AggregatedAppendOperation or creates a new one to add the given operation to.
 * An existing AggregatedAppend will be returned if it meets the following criteria:
 * * It is the last operation in the operation queue.
 * * Its size is smaller than maxLength.
 * * It is not sealed (already flushed).
 * <p>
 * If at least one of the above criteria is not met, a new AggregatedAppend is created and added at the end of the
 * operation queue.
 *
 * @param operationOffset         The Segment Offset of the operation to add.
 * @param operationSequenceNumber The Sequence Number of the operation to add.
 * @param maxLength               The maximum length for an aggregated append.
 * @return The AggregatedAppend to use (existing or freshly created).
 */
private AggregatedAppendOperation getOrCreateAggregatedAppend(long operationOffset, long operationSequenceNumber, int maxLength) {
    AggregatedAppendOperation aggregatedAppend = null;
    if (this.operations.size() > 0) {
        StorageOperation last = this.operations.getLast();
        if (last.getLength() < maxLength && isAppendOperation(last)) {
            aggregatedAppend = (AggregatedAppendOperation) last;
            if (aggregatedAppend.isSealed()) {
                aggregatedAppend = null;
            }
        }
    }
    if (aggregatedAppend == null) {
        // No operations or last operation not an AggregatedAppend - create a new one, while making sure the first
        // offset is not below the current StorageLength (otherwise we risk re-writing data that's already in Storage).
        long offset = Math.max(operationOffset, this.metadata.getStorageLength());
        aggregatedAppend = new AggregatedAppendOperation(this.metadata.getId(), offset, operationSequenceNumber);
        this.operations.add(aggregatedAppend);
    }
    return aggregatedAppend;
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation)

Example 14 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregator method updateStatePostFlush.

/**
 * Updates the metadata and the internal state after a flush was completed.
 *
 * @param flushArgs The arguments used for flushing.
 * @return A FlushResult containing statistics about the flush operation.
 */
private FlushResult updateStatePostFlush(FlushArgs flushArgs) {
    // Update the metadata Storage Length.
    long newLength = this.metadata.getStorageLength() + flushArgs.getLength();
    this.metadata.setStorageLength(newLength);
    // Remove operations from the outstanding list as long as every single byte it contains has been committed.
    boolean reachedEnd = false;
    while (this.operations.size() > 0 && !reachedEnd) {
        StorageOperation first = this.operations.getFirst();
        long lastOffset = first.getLastStreamSegmentOffset();
        reachedEnd = lastOffset >= newLength;
        // Verify that if we did reach the 'newLength' offset, we were on an append operation. Anything else is indicative of a bug.
        assert reachedEnd || isAppendOperation(first) : "Flushed operation was not an Append.";
        if (lastOffset <= newLength) {
            this.operations.removeFirst();
        }
    }
    // Update the last flush checkpoint.
    this.lastFlush.set(this.timer.getElapsed());
    return new FlushResult().withFlushedBytes(flushArgs.getLength());
}
Also used : StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation)

Example 15 with StorageOperation

use of io.pravega.segmentstore.server.logs.operations.StorageOperation in project pravega by pravega.

the class SegmentAggregator method reconcileAppendOperation.

/**
 * Attempts to reconcile the given Append Operation. Since Append Operations can be partially flushed, reconciliation
 * may be for the full operation or for a part of it.
 *
 * @param op          The Operation (StreamSegmentAppendOperation or CachedStreamSegmentAppendOperation) to reconcile.
 * @param storageInfo The current state of the Segment in Storage.
 * @param timer       Timer for the operation.
 * @return A CompletableFuture containing a FlushResult with the number of bytes reconciled, or failed with a ReconciliationFailureException,
 * if the operation cannot be reconciled, based on the in-memory metadata or the current state of the Segment in Storage.
 */
private CompletableFuture<FlushResult> reconcileAppendOperation(StorageOperation op, SegmentProperties storageInfo, TimeoutTimer timer) {
    Preconditions.checkArgument(op instanceof AggregatedAppendOperation, "Not given an append operation.");
    // Read data from Storage, and compare byte-by-byte.
    InputStream appendStream = this.dataSource.getAppendData(op.getStreamSegmentId(), op.getStreamSegmentOffset(), (int) op.getLength());
    if (appendStream == null) {
        return Futures.failedFuture(new ReconciliationFailureException(String.format("Unable to reconcile operation '%s' because no append data is associated with it.", op), this.metadata, storageInfo));
    }
    // Only read as much data as we need.
    long readLength = Math.min(op.getLastStreamSegmentOffset(), storageInfo.getLength()) - op.getStreamSegmentOffset();
    assert readLength > 0 : "Append Operation to be reconciled is beyond the Segment's StorageLength " + op;
    AtomicInteger bytesReadSoFar = new AtomicInteger();
    // Read all data from storage.
    byte[] storageData = new byte[(int) readLength];
    return Futures.loop(() -> bytesReadSoFar.get() < readLength, () -> this.storage.read(this.handle.get(), op.getStreamSegmentOffset() + bytesReadSoFar.get(), storageData, bytesReadSoFar.get(), (int) readLength - bytesReadSoFar.get(), timer.getRemaining()), bytesRead -> {
        assert bytesRead > 0 : String.format("Unable to make any read progress when reconciling operation '%s' after reading %s bytes.", op, bytesReadSoFar);
        bytesReadSoFar.addAndGet(bytesRead);
    }, this.executor).thenApplyAsync(v -> {
        // Compare, byte-by-byte, the contents of the append.
        verifySame(appendStream, storageData, op, storageInfo);
        if (readLength >= op.getLength() && op.getLastStreamSegmentOffset() <= storageInfo.getLength()) {
            // Operation has been completely validated; pop it off the list.
            StorageOperation removedOp = this.operations.removeFirst();
            assert op == removedOp : "Reconciled operation is not the same as removed operation";
        }
        return new FlushResult().withFlushedBytes(readLength);
    }, this.executor);
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) Getter(lombok.Getter) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) Exceptions(io.pravega.common.Exceptions) RequiredArgsConstructor(lombok.RequiredArgsConstructor) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) AbstractTimer(io.pravega.common.AbstractTimer) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) LoggerHelpers(io.pravega.common.LoggerHelpers) TimeoutTimer(io.pravega.common.TimeoutTimer) Executor(java.util.concurrent.Executor) CompletionException(java.util.concurrent.CompletionException) ThreadSafe(javax.annotation.concurrent.ThreadSafe) GuardedBy(javax.annotation.concurrent.GuardedBy) AtomicLong(java.util.concurrent.atomic.AtomicLong) Slf4j(lombok.extern.slf4j.Slf4j) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) Preconditions(com.google.common.base.Preconditions) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) ArrayDeque(java.util.ArrayDeque) MergeTransactionOperation(io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation) Futures(io.pravega.common.concurrent.Futures) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InputStream(java.io.InputStream) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation)

Aggregations

StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)34 Test (org.junit.Test)24 Cleanup (lombok.Cleanup)21 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)15 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)15 FixedByteArrayOutputStream (io.pravega.common.io.FixedByteArrayOutputStream)14 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)14 MergeTransactionOperation (io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation)14 ByteArrayOutputStream (java.io.ByteArrayOutputStream)14 ArrayList (java.util.ArrayList)14 AtomicLong (java.util.concurrent.atomic.AtomicLong)14 StreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation)13 lombok.val (lombok.val)13 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)12 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)12 InputStream (java.io.InputStream)12 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)11 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)11 IntentionalException (io.pravega.test.common.IntentionalException)11