use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class SegmentAggregatorTests method testSealWithStorageErrors.
/**
* Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
*/
@Test
public void testSealWithStorageErrors() throws Exception {
// Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
final int appendCount = 1000;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// Have the writes fail every few attempts with a well known exception.
AtomicBoolean generateSyncException = new AtomicBoolean(true);
AtomicBoolean generateAsyncException = new AtomicBoolean(true);
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setSealSyncErrorInjector(new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
context.storage.setSealAsyncErrorInjector(new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));
// Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
int attemptCount = 4;
for (int i = 0; i < attemptCount; i++) {
// Repeat a number of times, at least once should work.
setException.set(null);
try {
FlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
} else {
// Not expecting any exception this time.
throw ex;
}
}
if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
// We are done. We got at least one through.
break;
}
}
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class SegmentAggregatorTests method testTruncateAlreadySealedSegment.
/**
* Tests the SegmentAggregator's behavior when an already Sealed Segment is opened and truncated.
*/
@Test
public void testTruncateAlreadySealedSegment() throws Exception {
// Pre-create the segment, write some data, and then seal it.
val rnd = new Random(0);
byte[] storageData = new byte[100];
rnd.nextBytes(storageData);
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.storage.openWrite(context.segmentAggregator.getMetadata().getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(storageData), storageData.length, TIMEOUT)).join();
val sm = context.containerMetadata.getStreamSegmentMetadata(context.segmentAggregator.getMetadata().getId());
sm.setLength(storageData.length);
sm.setStorageLength(storageData.length);
sm.markSealed();
sm.markSealedInStorage();
// Initialize the SegmentAggregator.
context.segmentAggregator.initialize(TIMEOUT).join();
// Generate and add a Seal Operation.
StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(truncateOp);
Assert.assertTrue("Unexpected value returned by mustFlush() after adding StreamSegmentTruncateOperation.", context.segmentAggregator.mustFlush());
// Call flush and verify that the entire Aggregator got flushed and the Truncate got persisted to Storage.
context.segmentAggregator.flush(TIMEOUT).join();
// Verify data.
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes in Storage.", storageData.length, storageInfo.getLength());
Assert.assertEquals("Unexpected truncation offset in Storage.", truncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class SegmentAggregatorTests method testFlushAppendWithStorageErrors.
/**
* Tests the behavior of flush() with appends and storage errors (on the write() method).
*/
@Test
public void testFlushAppendWithStorageErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = config.getFlushThresholdBytes() * 10;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
@Cleanup TestContext context = new TestContext(config);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setWriteSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
int exceptionCount = 0;
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
// Call flush() and inspect the result.
setException.set(null);
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
FlushResult flushResult = null;
try {
flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
exceptionCount++;
} else {
// Not expecting any exception this time.
throw ex;
}
}
// Check flush result.
if (flushResult != null) {
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
}
// Do one last flush at the end to make sure we clear out all the buffers, if there's anything else left.
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
context.storage.setWriteSyncErrorInjector(null);
context.storage.setWriteAsyncErrorInjector(null);
context.segmentAggregator.flush(TIMEOUT).join();
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
AssertExtensions.assertGreaterThan("Not enough errors injected.", 0, exceptionCount);
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class StorageWriterTests method testCleanup.
/**
* Tests the ability of the StorageWriter to cleanup SegmentAggregators that have been deleted in Storage or are
* gone from the Metadata.
* 1. Creates 3 segments, and adds an append for each of them.
* 2. Marks segment 2 as deleted (in metadata) and evicts segment 3 from metadata (no deletion).
* 3. Runs one more Writer cycle (to clean up).
* 4. Reinstates the missing segment metadatas and adds appends for each of them, verifying that the Writer re-requests
* the metadata for those two.
*/
@Test
public void testCleanup() throws Exception {
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // This differs from DEFAULT_CONFIG.
1).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).with(WriterConfig.MAX_READ_TIMEOUT_MILLIS, 250L).with(WriterConfig.MAX_ITEMS_TO_READ_AT_ONCE, 100).with(WriterConfig.ERROR_SLEEP_MILLIS, 0L).build();
@Cleanup TestContext context = new TestContext(config);
context.writer.startAsync();
// Create a bunch of segments and Transaction.
final ArrayList<Long> segmentIds = createSegments(context);
final UpdateableSegmentMetadata segment1 = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
final UpdateableSegmentMetadata segment2 = context.metadata.getStreamSegmentMetadata(segmentIds.get(1));
final UpdateableSegmentMetadata segment3 = context.metadata.getStreamSegmentMetadata(segmentIds.get(2));
final byte[] data = new byte[1];
Function<UpdateableSegmentMetadata, Operation> createAppend = segment -> {
StreamSegmentAppendOperation append = new StreamSegmentAppendOperation(segment.getId(), data, null);
append.setStreamSegmentOffset(segment.getLength());
context.dataSource.recordAppend(append);
segment.setLength(segment.getLength() + data.length);
return new CachedStreamSegmentAppendOperation(append);
};
// Process an append for each segment, to make sure the writer has knowledge of those segments.
context.dataSource.add(createAppend.apply(segment1));
context.dataSource.add(createAppend.apply(segment2));
context.dataSource.add(createAppend.apply(segment3));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Delete segment2 (markDeleted) and evict segment3 (by forcing the metadata to forget about it).
long evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
context.metadata.getStreamSegmentId(segment2.getName(), true);
segment2.markDeleted();
Collection<Long> evictedSegments = evictSegments(evictionCutoff, context);
// Make sure the right segment is evicted, and not the other two ones (there are other segments in this system which we don't care about).
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment3.getId()));
Assert.assertFalse("Unexpected segments were not evicted.", evictedSegments.contains(segment1.getId()) && evictedSegments.contains(segment3.getId()));
// Add one more append to Segment1 - this will force the writer to go on a full iteration and thus invoke cleanup.
context.dataSource.add(createAppend.apply(segment1));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Get rid of Segment2 from the metadata.
evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
evictedSegments = evictSegments(evictionCutoff, context);
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment2.getId()));
// Repopulate the metadata.
val segment2Take2 = context.metadata.mapStreamSegmentId(segment2.getName(), segment2.getId());
val segment3Take2 = context.metadata.mapStreamSegmentId(segment3.getName(), segment3.getId());
segment2Take2.copyFrom(segment2);
segment3Take2.copyFrom(segment3);
// Add an append for each of the re-added segments and verify that the Writer re-requested the metadata, which
// indicates it had to recreate their SegmentAggregators.
HashSet<Long> requestedSegmentIds = new HashSet<>();
context.dataSource.setSegmentMetadataRequested(requestedSegmentIds::add);
context.dataSource.add(createAppend.apply(segment2Take2));
context.dataSource.add(createAppend.apply(segment3Take2));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("The deleted segments did not have their metadata requested.", requestedSegmentIds.contains(segment2.getId()) && requestedSegmentIds.contains(segment3.getId()));
}
use of io.pravega.segmentstore.server.logs.operations.Operation in project pravega by pravega.
the class TestWriterDataSource method waitForAdd.
// endregion
// region Helpers
private CompletableFuture<Void> waitForAdd(long currentSeqNo, Duration timeout) {
CompletableFuture<Void> result;
synchronized (this.lock) {
Operation last = this.log.getLast();
if (last != null && last.getSequenceNumber() > currentSeqNo) {
// An add has already been processed that meets or exceeds the given sequence number.
result = CompletableFuture.completedFuture(null);
} else {
if (this.addProcessed == null) {
// We need to wait for an add, and nobody else is waiting for it too.
this.addProcessed = Futures.futureWithTimeout(timeout, this.executor);
Futures.onTimeout(this.addProcessed, ex -> {
synchronized (this.lock) {
if (this.addProcessed.isCompletedExceptionally()) {
this.addProcessed = null;
}
}
});
}
result = this.addProcessed;
}
}
return result;
}
Aggregations