use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testFlushAppend.
// endregion
// region flush()
/**
* Tests the flush() method only with Append operations.
* Verifies both length-based and time-based flush triggers, as well as flushing rather large operations.
*/
@Test
public void testFlushAppend() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = config.getFlushThresholdBytes() * 10;
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Number of bytes remaining to be flushed.
AtomicLong outstandingSize = new AtomicLong();
SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
// Part 1: flush triggered by accumulated size.
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
boolean expectFlush = outstandingSize.get() >= config.getFlushThresholdBytes();
Assert.assertEquals("Unexpected value returned by mustFlush() (size threshold).", expectFlush, context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
// Call flush() and inspect the result.
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
if (expectFlush) {
AssertExtensions.assertGreaterThanOrEqual("Not enough bytes were flushed (size threshold).", config.getFlushThresholdBytes(), flushResult.getFlushedBytes());
outstandingSize.addAndGet(-flushResult.getFlushedBytes());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (size threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
} else {
Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
}
Assert.assertFalse("Unexpected value returned by mustFlush() after flush (size threshold).", context.segmentAggregator.mustFlush());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
// Part 2: flush triggered by time.
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
// Call flush() and inspect the result.
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
// We are always expecting a flush.
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
outstandingSize.addAndGet(-flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
// Part 3: Transaction appends. This will force an internal loop inside flush() to do so repeatedly.
final int transactionSize = 100;
for (int i = 0; i < appendCount / 10; i++) {
for (int j = 0; j < transactionSize; j++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
}
// Call flush() and inspect the result.
Assert.assertTrue("Unexpected value returned by mustFlush() (Transaction appends).", context.segmentAggregator.mustFlush());
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
// We are always expecting a flush.
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (Transaction appends).", 0, flushResult.getFlushedBytes());
outstandingSize.addAndGet(-flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flush (Transaction appends).", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (Transaction appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
// Part 4: large appends (larger than MaxFlushSize).
Random random = RandomFactory.create();
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
byte[] largeAppendData = new byte[config.getMaxFlushSizeBytes() * 10 + 1];
random.nextBytes(largeAppendData);
StorageOperation appendOp = generateAppendAndUpdateMetadata(SEGMENT_ID, largeAppendData, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
// Call flush() and inspect the result.
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
Assert.assertTrue("Unexpected value returned by mustFlush() (large appends).", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
// We are always expecting a flush.
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (large appends).", 0, flushResult.getFlushedBytes());
outstandingSize.addAndGet(-flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (large appends).", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
Assert.assertEquals("Not expecting any merged bytes in this test (large appends).", 0, flushResult.getMergedBytes());
}
// Verify data.
Assert.assertEquals("Not expecting leftover data not flushed.", 0, outstandingSize.get());
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class AttributeAggregatorTests method forceTimeFlush.
private WriterFlushResult forceTimeFlush(TestContext context) throws Exception {
// Force a flush by incrementing the time by a lot.
context.increaseTime(context.config.getFlushThresholdTime().toMillis() + 1);
Assert.assertTrue("Unexpected value returned by mustFlush() (time threshold).", context.aggregator.mustFlush());
WriterFlushResult flushResult = context.aggregator.flush(TIMEOUT).get(SHORT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertFalse("Unexpected value returned by mustFlush() after flush (time threshold).", context.aggregator.mustFlush());
return flushResult;
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregator method flushFully.
/**
* Flushes all Append Operations that can be flushed at the given moment (until the entire Aggregator is emptied out
* or until a StreamSegmentSealOperation or MergeSegmentOperation is encountered).
*
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<WriterFlushResult> flushFully(TimeoutTimer timer) {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushFully");
WriterFlushResult result = new WriterFlushResult();
return Futures.loop(this::canContinueFlushingFully, () -> flushPendingAppends(timer.getRemaining()).thenCompose(flushResult -> flushPendingTruncate(flushResult, timer.getRemaining())), result::withFlushResult, this.executor).thenApply(v -> {
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushFully", traceId, result);
return result;
});
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregator method flushNormally.
/**
* Repeatedly flushes the contents of the Aggregator to the Storage as long as something immediate needs to be flushed,
* such as a Seal or Merge operation.
*
* @param force Whether to force everything out.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<WriterFlushResult> flushNormally(boolean force, TimeoutTimer timer) {
assert this.state.get() == AggregatorState.Writing : "flushNormally cannot be called if state == " + this.state;
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushNormally", force, this.operations.size());
WriterFlushResult result = new WriterFlushResult();
AtomicBoolean canContinue = new AtomicBoolean(true);
return Futures.loop(canContinue::get, () -> flushOnce(force, timer), partialResult -> {
canContinue.set(partialResult.getFlushedBytes() + partialResult.getMergedBytes() > 0);
result.withFlushResult(partialResult);
}, this.executor).thenApply(v -> {
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushNormally", traceId, result);
return result;
});
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method flushAllSegments.
private void flushAllSegments(TestContext context) throws Exception {
// Flush all segments in the TestContext, as long as any of them still has something to flush and is able
// to flush anything.
boolean anythingFlushed = true;
while (anythingFlushed) {
anythingFlushed = false;
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
if (transactionAggregator.mustFlush()) {
WriterFlushResult transactionFlushResult = transactionAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
anythingFlushed = anythingFlushed | transactionFlushResult.getFlushedBytes() > 0;
}
}
if (context.segmentAggregator.mustFlush()) {
WriterFlushResult parentFlushResult = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
anythingFlushed = anythingFlushed | (parentFlushResult.getFlushedBytes() + parentFlushResult.getMergedBytes()) > 0;
}
}
}
Aggregations