Search in sources :

Example 6 with WriterFlushResult

use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.

the class SegmentAggregatorTests method testFlushAppend.

// endregion
// region flush()
/**
 * Tests the flush() method only with Append operations.
 * Verifies both length-based and time-based flush triggers, as well as flushing rather large operations.
 */
@Test
public void testFlushAppend() throws Exception {
    final WriterConfig config = DEFAULT_CONFIG;
    final int appendCount = config.getFlushThresholdBytes() * 10;
    @Cleanup TestContext context = new TestContext(config);
    context.segmentAggregator.initialize(TIMEOUT).join();
    @Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
    // Number of bytes remaining to be flushed.
    AtomicLong outstandingSize = new AtomicLong();
    SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
    // Part 1: flush triggered by accumulated size.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        boolean expectFlush = outstandingSize.get() >= config.getFlushThresholdBytes();
        Assert.assertEquals("Unexpected value returned by mustFlush() (size threshold).", expectFlush, context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        // Call flush() and inspect the result.
        WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        if (expectFlush) {
            AssertExtensions.assertGreaterThanOrEqual("Not enough bytes were flushed (size threshold).", config.getFlushThresholdBytes(), flushResult.getFlushedBytes());
            outstandingSize.addAndGet(-flushResult.getFlushedBytes());
            Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        } else {
            Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
        }
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (size threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }
    // Part 2: flush triggered by time.
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        // Call flush() and inspect the result.
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }
    // Part 3: Transaction appends. This will force an internal loop inside flush() to do so repeatedly.
    final int transactionSize = 100;
    for (int i = 0; i < appendCount / 10; i++) {
        for (int j = 0; j < transactionSize; j++) {
            // Add another operation and record its length.
            StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
            outstandingSize.addAndGet(appendOp.getLength());
            context.segmentAggregator.add(appendOp);
            getAppendData(appendOp, writtenData, context);
            sequenceNumbers.record(appendOp);
            Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        }
        // Call flush() and inspect the result.
        Assert.assertTrue("Unexpected value returned by mustFlush() (Transaction appends).", context.segmentAggregator.mustFlush());
        WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (Transaction appends).", 0, flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (Transaction appends).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
    }
    // Part 4: large appends (larger than MaxFlushSize).
    Random random = RandomFactory.create();
    for (int i = 0; i < appendCount; i++) {
        // Add another operation and record its length.
        byte[] largeAppendData = new byte[config.getMaxFlushSizeBytes() * 10 + 1];
        random.nextBytes(largeAppendData);
        StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, largeAppendData, context);
        outstandingSize.addAndGet(appendOp.getLength());
        context.segmentAggregator.add(appendOp);
        getAppendData(appendOp, writtenData, context);
        sequenceNumbers.record(appendOp);
        // Call flush() and inspect the result.
        // Force a flush by incrementing the time by a lot.
        context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
        Assert.assertTrue("Unexpected value returned by mustFlush() (large appends).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
        // We are always expecting a flush.
        AssertExtensions.assertGreaterThan("Not enough bytes were flushed (large appends).", 0, flushResult.getFlushedBytes());
        outstandingSize.addAndGet(-flushResult.getFlushedBytes());
        Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush());
        Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
        Assert.assertEquals("Not expecting any merged bytes in this test (large appends).", 0, flushResult.getMergedBytes());
    }
    // Verify data.
    Assert.assertEquals("Not expecting leftover data not flushed.", 0, outstandingSize.get());
    byte[] expectedData = writtenData.toByteArray();
    byte[] actualData = new byte[expectedData.length];
    long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
    Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
    context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
    Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Also used : WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Cleanup(lombok.Cleanup) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) Test(org.junit.Test)

Example 7 with WriterFlushResult

use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.

the class AttributeAggregatorTests method forceTimeFlush.

private WriterFlushResult forceTimeFlush(TestContext context) throws Exception {
    // Force a flush by incrementing the time by a lot.
    context.increaseTime(context.config.getFlushThresholdTime().toMillis() + 1);
    Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.aggregator.mustFlush());
    WriterFlushResult flushResult = context.aggregator.flush(TIMEOUT).get(SHORT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.aggregator.mustFlush());
    return flushResult;
}
Also used : WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult)

Example 8 with WriterFlushResult

use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.

the class SegmentAggregator method flushFully.

/**
 * Flushes all Append Operations that can be flushed at the given moment (until the entire Aggregator is emptied out
 * or until a StreamSegmentSealOperation or MergeSegmentOperation is encountered).
 *
 * @param timer Timer for the operation.
 * @return A CompletableFuture that, when completed, will contain the result from the flush operation.
 */
private CompletableFuture<WriterFlushResult> flushFully(TimeoutTimer timer) {
    long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushFully");
    WriterFlushResult result = new WriterFlushResult();
    return Futures.loop(this::canContinueFlushingFully, () -> flushPendingAppends(timer.getRemaining()).thenCompose(flushResult -> flushPendingTruncate(flushResult, timer.getRemaining())), result::withFlushResult, this.executor).thenApply(v -> {
        LoggerHelpers.traceLeave(log, this.traceObjectId, "flushFully", traceId, result);
        return result;
    });
}
Also used : WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult)

Example 9 with WriterFlushResult

use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.

the class SegmentAggregator method flushNormally.

/**
 * Repeatedly flushes the contents of the Aggregator to the Storage as long as something immediate needs to be flushed,
 * such as a Seal or Merge operation.
 *
 * @param force Whether to force everything out.
 * @param timer Timer for the operation.
 * @return A CompletableFuture that, when completed, will contain the result from the flush operation.
 */
private CompletableFuture<WriterFlushResult> flushNormally(boolean force, TimeoutTimer timer) {
    assert this.state.get() == AggregatorState.Writing : "flushNormally cannot be called if state == " + this.state;
    long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushNormally", force, this.operations.size());
    WriterFlushResult result = new WriterFlushResult();
    AtomicBoolean canContinue = new AtomicBoolean(true);
    return Futures.loop(canContinue::get, () -> flushOnce(force, timer), partialResult -> {
        canContinue.set(partialResult.getFlushedBytes() + partialResult.getMergedBytes() > 0);
        result.withFlushResult(partialResult);
    }, this.executor).thenApply(v -> {
        LoggerHelpers.traceLeave(log, this.traceObjectId, "flushNormally", traceId, result);
        return result;
    });
}
Also used : Storage(io.pravega.segmentstore.storage.Storage) StreamSegmentInformation(io.pravega.segmentstore.contracts.StreamSegmentInformation) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) SneakyThrows(lombok.SneakyThrows) MergeSegmentOperation(io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation) Cleanup(lombok.Cleanup) ServiceHaltException(io.pravega.segmentstore.server.ServiceHaltException) UpdateableSegmentMetadata(io.pravega.segmentstore.server.UpdateableSegmentMetadata) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) StreamSegmentSealedException(io.pravega.segmentstore.contracts.StreamSegmentSealedException) SegmentHandle(io.pravega.segmentstore.storage.SegmentHandle) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BufferView(io.pravega.common.util.BufferView) Duration(java.time.Duration) Operation(io.pravega.segmentstore.server.logs.operations.Operation) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult) StreamSegmentTruncateOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentTruncateOperation) Attributes(io.pravega.segmentstore.contracts.Attributes) Predicate(java.util.function.Predicate) CompletionException(java.util.concurrent.CompletionException) ThreadSafe(javax.annotation.concurrent.ThreadSafe) GuardedBy(javax.annotation.concurrent.GuardedBy) Collectors(java.util.stream.Collectors) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) BadOffsetException(io.pravega.segmentstore.contracts.BadOffsetException) WriterSegmentProcessor(io.pravega.segmentstore.server.WriterSegmentProcessor) Futures(io.pravega.common.concurrent.Futures) Getter(lombok.Getter) SegmentRollingPolicy(io.pravega.segmentstore.storage.SegmentRollingPolicy) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) AbstractTimer(io.pravega.common.AbstractTimer) SegmentMetadata(io.pravega.segmentstore.server.SegmentMetadata) Nullable(javax.annotation.Nullable) LoggerHelpers(io.pravega.common.LoggerHelpers) TimeoutTimer(io.pravega.common.TimeoutTimer) Executor(java.util.concurrent.Executor) AtomicLong(java.util.concurrent.atomic.AtomicLong) SegmentOperation(io.pravega.segmentstore.server.SegmentOperation) CachedStreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation) StorageOperation(io.pravega.segmentstore.server.logs.operations.StorageOperation) StreamSegmentAppendOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation) Preconditions(com.google.common.base.Preconditions) DataCorruptionException(io.pravega.segmentstore.server.DataCorruptionException) ArrayDeque(java.util.ArrayDeque) DeleteSegmentOperation(io.pravega.segmentstore.server.logs.operations.DeleteSegmentOperation) StreamSegmentSealOperation(io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation) InputStream(java.io.InputStream) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult)

Example 10 with WriterFlushResult

use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.

the class SegmentAggregatorTests method flushAllSegments.

private void flushAllSegments(TestContext context) throws Exception {
    // Flush all segments in the TestContext, as long as any of them still has something to flush and is able
    // to flush anything.
    boolean anythingFlushed = true;
    while (anythingFlushed) {
        anythingFlushed = false;
        for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
            if (transactionAggregator.mustFlush()) {
                WriterFlushResult transactionFlushResult = transactionAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
                anythingFlushed = anythingFlushed | transactionFlushResult.getFlushedBytes() > 0;
            }
        }
        if (context.segmentAggregator.mustFlush()) {
            WriterFlushResult parentFlushResult = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
            anythingFlushed = anythingFlushed | (parentFlushResult.getFlushedBytes() + parentFlushResult.getMergedBytes()) > 0;
        }
    }
}
Also used : WriterFlushResult(io.pravega.segmentstore.server.WriterFlushResult)

Aggregations

WriterFlushResult (io.pravega.segmentstore.server.WriterFlushResult)26 AtomicLong (java.util.concurrent.atomic.AtomicLong)17 Cleanup (lombok.Cleanup)16 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)15 DataCorruptionException (io.pravega.segmentstore.server.DataCorruptionException)12 SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)11 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)11 Operation (io.pravega.segmentstore.server.logs.operations.Operation)11 Duration (java.time.Duration)11 CompletableFuture (java.util.concurrent.CompletableFuture)11 Test (org.junit.Test)11 Preconditions (com.google.common.base.Preconditions)10 Exceptions (io.pravega.common.Exceptions)10 Futures (io.pravega.common.concurrent.Futures)10 Attributes (io.pravega.segmentstore.contracts.Attributes)10 UpdateableSegmentMetadata (io.pravega.segmentstore.server.UpdateableSegmentMetadata)10 CachedStreamSegmentAppendOperation (io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation)10 StreamSegmentSealOperation (io.pravega.segmentstore.server.logs.operations.StreamSegmentSealOperation)10 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)10 AtomicReference (java.util.concurrent.atomic.AtomicReference)10