use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testSealWithStorageErrors.
/**
* Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
*/
@Test
public void testSealWithStorageErrors() throws Exception {
// Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
final int appendCount = 1000;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// Have the writes fail every few attempts with a well known exception.
AtomicBoolean generateSyncException = new AtomicBoolean(true);
AtomicBoolean generateAsyncException = new AtomicBoolean(true);
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setSealSyncErrorInjector(new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
context.storage.setSealAsyncErrorInjector(new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));
// Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
int attemptCount = 4;
for (int i = 0; i < attemptCount; i++) {
// Repeat a number of times, at least once should work.
setException.set(null);
try {
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
} else {
// Not expecting any exception this time.
throw ex;
}
}
if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
// We are done. We got at least one through.
break;
}
}
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testTruncate.
/**
* Tests the flush() method with StreamSegmentTruncateOperations.
*/
@Test
public void testTruncate() throws Exception {
// Add some appends and a truncate, and then flush together. Verify that everything got flushed in one go.
final int appendCount = 1000;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Accumulate some Appends
AtomicLong outstandingSize = new AtomicLong();
SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
}
Assert.assertFalse("Unexpected value returned by mustFlush() before adding StreamSegmentTruncateOperation.", context.segmentAggregator.mustFlush());
// Generate and add a Truncate Operation.
StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(truncateOp);
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after adding StreamSegmentTruncateOperation.", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value returned by mustFlush() after adding StreamSegmentTruncateOperation.", context.segmentAggregator.mustFlush());
// Call flush and verify that the entire Aggregator got flushed and the Truncate got persisted to Storage.
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertEquals("Unexpected truncation offset in Storage.", truncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method tryFlushAllSegments.
private <T extends Throwable> void tryFlushAllSegments(TestContext context, Runnable exceptionReset, Supplier<T> exceptionProvider) {
// Flush all segments in the TestContext, as long as any of them still has something to flush and is able to
// flush anything, or an exception was thrown (and expected).
boolean anythingFlushed = true;
while (anythingFlushed) {
anythingFlushed = false;
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
if (transactionAggregator.mustFlush()) {
exceptionReset.run();
WriterFlushResult transactionFlushResult = tryFlushSegment(transactionAggregator, exceptionProvider);
anythingFlushed = anythingFlushed | (transactionFlushResult == null || transactionFlushResult.getFlushedBytes() > 0);
}
}
if (context.segmentAggregator.mustFlush()) {
exceptionReset.run();
WriterFlushResult parentFlushResult = tryFlushSegment(context.segmentAggregator, exceptionProvider);
anythingFlushed = anythingFlushed | (parentFlushResult == null || (parentFlushResult.getFlushedBytes() + parentFlushResult.getMergedBytes()) > 0);
}
}
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testFlushAppendWithStorageErrors.
/**
* Tests the behavior of flush() with appends and storage errors (on the write() method).
*/
@Test
public void testFlushAppendWithStorageErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = config.getFlushThresholdBytes() * 10;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setWriteSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
int exceptionCount = 0;
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
// Call flush() and inspect the result.
setException.set(null);
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
WriterFlushResult flushResult = null;
try {
flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
exceptionCount++;
} else {
// Not expecting any exception this time.
throw ex;
}
}
// Check flush result.
if (flushResult != null) {
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
}
// Do one last flush at the end to make sure we clear out all the buffers, if there's anything else left.
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
context.storage.setWriteSyncErrorInjector(null);
context.storage.setWriteAsyncErrorInjector(null);
context.segmentAggregator.flush(TIMEOUT).join();
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
AssertExtensions.assertGreaterThan("Not enough errors injected.", 0, exceptionCount);
}
use of io.pravega.segmentstore.server.WriterFlushResult in project pravega by pravega.
the class SegmentAggregatorTests method testSeal.
/**
* Tests the flush() method with Append and StreamSegmentSealOperations.
*/
@Test
public void testSeal() throws Exception {
// Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
final int appendCount = 1000;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Accumulate some Appends
AtomicLong outstandingSize = new AtomicLong();
SequenceNumberCalculator sequenceNumbers = new SequenceNumberCalculator(context, outstandingSize);
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
outstandingSize.addAndGet(appendOp.getLength());
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
sequenceNumbers.record(appendOp);
// Call flush() and verify that we haven't flushed anything (by design).
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertEquals(String.format("Not expecting a flush. OutstandingSize=%s, Threshold=%d", outstandingSize, config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
Assert.assertFalse("Unexpected value returned by mustFlush() before adding StreamSegmentSealOperation.", context.segmentAggregator.mustFlush());
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after adding StreamSegmentSealOperation.", sequenceNumbers.getLowestUncommitted(), context.segmentAggregator.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value returned by mustFlush() after adding StreamSegmentSealOperation.", context.segmentAggregator.mustFlush());
// Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertEquals("Expected the entire Aggregator to be flushed.", outstandingSize.get(), flushResult.getFlushedBytes());
Assert.assertFalse("Unexpected value returned by mustFlush() after flushing.", context.segmentAggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flushing.", Operation.NO_SEQUENCE_NUMBER, context.segmentAggregator.getLowestUncommittedSequenceNumber());
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
context.storage.read(InMemoryStorage.newHandle(context.segmentAggregator.getMetadata().getName(), false), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Aggregations