use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testTruncateAndSeal.
/**
* Tests the flush() method with StreamSegmentTruncateOperations after the segment has been Sealed.
*/
@Test
public void testTruncateAndSeal() throws Exception {
// Add some data and intersperse with truncates.
final int appendCount = 1000;
final int truncateEvery = 20;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Accumulate some Appends
AtomicLong outstandingSize = new AtomicLong();
SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
if (i % truncateEvery == 1) {
StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(truncateOp);
sequenceNumbers.record(truncateOp);
}
}
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// Add another truncate op, after the Seal.
StorageOperation lastTruncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(lastTruncateOp);
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Unexpected sealed status in Storage.", storageInfo.isSealed());
Assert.assertEquals("Unexpected truncation offset in Storage.", lastTruncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileAppends.
/**
* Tests the ability of the SegmentAggregator to reconcile AppendOperations (Cached/NonCached).
*/
@Test
public void testReconcileAppends() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = 1000;
final int failEvery = 3;
final int partialFailEvery = 6;
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
// The writes always succeed, but every few times we return some random error, indicating that they didn't.
AtomicInteger writeCount = new AtomicInteger();
AtomicReference<Exception> setException = new AtomicReference<>();
context.storage.setWriteInterceptor((segmentName, offset, data, length, storage) -> {
int wc = writeCount.incrementAndGet();
if (wc % failEvery == 0) {
if (wc % partialFailEvery == 0) {
// Only a part of the operation has been written. Verify that we can reconcile partially written
// operations as well.
length /= 2;
}
// Time to wreak some havoc.
return storage.write(writeHandle(segmentName), offset, data, length, TIMEOUT).thenAccept(v -> {
IntentionalException ex = new IntentionalException(String.format("S=%s,O=%d", segmentName, offset));
setException.set(ex);
throw ex;
});
} else {
setException.set(null);
return null;
}
});
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
while (context.segmentAggregator.mustFlush()) {
// Call flush() and inspect the result.
WriterFlushResult flushResult = null;
try {
flushResult = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
} else {
// Only expecting a BadOffsetException after our own injected exception.
Throwable realEx = Exceptions.unwrap(ex);
Assert.assertTrue("Unexpected exception thrown: " + realEx, realEx instanceof BadOffsetException);
}
}
// Check flush result.
if (flushResult != null) {
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
}
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class AttributeAggregator method flush.
/**
* Flushes the contents of the Aggregator to the Storage.
*
* @param force If true, force-flushes everything accumulated in the {@link AttributeAggregator}, regardless of
* the value returned by {@link #mustFlush()}.
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain a summary of the flush operation. If any errors
* occurred during the flush, the Future will be completed with the appropriate exception.
*/
@Override
public CompletableFuture<WriterFlushResult> flush(boolean force, Duration timeout) {
Exceptions.checkNotClosed(isClosed(), this);
if (!force && !mustFlush()) {
return CompletableFuture.completedFuture(new WriterFlushResult());
}
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<Void> result = handleAttributeException(persistPendingAttributes(this.state.getAttributes(), this.state.getLastSequenceNumber(), timer));
if (this.state.hasSeal()) {
result = result.thenComposeAsync(v -> handleAttributeException(sealAttributes(timer)), this.executor);
}
return result.thenApply(v -> {
if (this.state.size() > 0) {
log.debug("{}: Flushed. Count={}, SeqNo={}-{}, Forced={}.", this.traceObjectId, this.state.size(), this.state.getFirstSequenceNumber(), this.state.getLastSequenceNumber(), force);
}
WriterFlushResult r = new WriterFlushResult();
r.withFlushedAttributes(this.state.size());
this.state.acceptChanges();
this.lastFlush.set(this.timer.getElapsed());
return r;
});
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregator method flushPendingAppends.
/**
* Flushes all Append Operations that can be flushed up to the maximum allowed flush size.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the result from the flush operation.
*/
private CompletableFuture<WriterFlushResult> flushPendingAppends(Duration timeout) {
// Gather an InputStream made up of all the operations we can flush.
BufferView flushData;
try {
flushData = getFlushData();
} catch (DataCorruptionException ex) {
return Futures.failedFuture(ex);
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
// Flush them.
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<Void> flush;
if (flushData == null || flushData.getLength() == 0) {
flush = CompletableFuture.completedFuture(null);
} else {
flush = createSegmentIfNecessary(() -> this.storage.write(this.handle.get(), this.metadata.getStorageLength(), flushData.getReader(), flushData.getLength(), timer.getRemaining()), timer.getRemaining());
}
return flush.thenApplyAsync(v -> {
WriterFlushResult result = updateStatePostFlush(flushData);
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
// We attempted to write at an offset that already contained other data. This can happen for a number of
// reasons, but we do not have enough information here to determine why. We need to enter reconciliation
// mode, which will determine the actual state of the segment in storage and take appropriate actions.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<WriterFlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeSegmentOperation mergeOp, TimeoutTimer timer) {
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
boolean emptySourceSegment = transactionMetadata.getLength() == 0;
if (transactionMetadata.isDeleted() && !emptySourceSegment) {
// We came across a deleted source segment that had some data. We need to begin a reconciliation to figure out
// the actual state of the segments in Storage.
setState(AggregatorState.ReconciliationNeeded);
return Futures.failedFuture(new StreamSegmentNotExistsException(transactionMetadata.getName()));
}
WriterFlushResult result = new WriterFlushResult();
CompletableFuture<SegmentProperties> merge;
if (emptySourceSegment) {
// We came across a deleted source segment which had no data. No point in attempting to do anything, as any
// operation involving this segment will complain about it not being there.
log.warn("{}: Not applying '{}' because source segment is missing or empty.", this.traceObjectId, mergeOp);
merge = CompletableFuture.completedFuture(this.metadata);
} else if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
} else {
merge = mergeInStorage(transactionMetadata, mergeOp, timer);
}
// need to make AttributeAggregator aware of merging segments.
return merge.thenAcceptAsync(segmentProperties -> mergeCompleted(segmentProperties, transactionMetadata, mergeOp), this.executor).thenComposeAsync(v -> this.dataSource.deleteAllAttributes(transactionMetadata, timer.getRemaining()), this.executor).thenApply(v -> {
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergeOp.getLength());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
Aggregations