use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testDelete.
/**
* Tests the ability to process a {@link DeleteSegmentOperation} on Segments in various states:
* - Empty (not yet created).
* - Empty (created, but no data).
* - Not empty, not sealed.
* - Sealed (empty or not).
*/
@Test
public void testDelete() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
val notCreated = context.transactionAggregators[0];
val empty = context.transactionAggregators[1];
val notSealed = context.transactionAggregators[2];
val sealed = context.transactionAggregators[3];
val withMergers = context.transactionAggregators[4];
val withMergerSource = context.transactionAggregators[5];
val emptyWithAttributes = context.transactionAggregators[6];
val allAggregators = new SegmentAggregator[] { notCreated, empty, notSealed, sealed, withMergers, emptyWithAttributes };
// Create the segments that are supposed to exist in Storage.
Stream.of(empty, notSealed, sealed).forEach(a -> context.storage.create(a.getMetadata().getName(), TIMEOUT).join());
// Write 1 byte to the non-empty segment and add 1 attribute.
context.storage.openWrite(notSealed.getMetadata().getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[] { 1 }), 1, TIMEOUT)).join();
((UpdateableSegmentMetadata) notSealed.getMetadata()).setLength(1L);
context.dataSource.persistAttributes(notSealed.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT).join();
// Seal the sealed segment.
((UpdateableSegmentMetadata) sealed.getMetadata()).markSealed();
context.storage.openWrite(sealed.getMetadata().getName()).thenCompose(handle -> context.storage.seal(handle, TIMEOUT)).join();
context.dataSource.persistAttributes(sealed.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT).join();
// Create a source segment; we'll verify this was also deleted when its target was.
context.storage.create(withMergerSource.getMetadata().getName(), TIMEOUT).join();
context.dataSource.persistAttributes(withMergerSource.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 2L), TIMEOUT).join();
// This segment has an attribute index, but no segment has been created yet (since no data has been written to it).
context.dataSource.persistAttributes(emptyWithAttributes.getMetadata().getId(), Collections.singletonMap(AttributeId.randomUUID(), 3L), TIMEOUT).join();
for (val a : allAggregators) {
// Initialize the Aggregator and add the DeleteSegmentOperation.
a.initialize(TIMEOUT).join();
if (a == withMergers) {
// Add a merged segment to this one, but not before adding an arbitrary operation.
withMergers.add(generateAppendAndUpdateMetadata(1, withMergers.getMetadata().getId(), context));
a.add(generateMergeTransactionAndUpdateMetadata(withMergers.getMetadata().getId(), withMergerSource.getMetadata().getId(), context));
}
a.add(generateDeleteAndUpdateMetadata(a.getMetadata().getId(), context));
AssertExtensions.assertGreaterThan("Unexpected LUSN before flush.", 0, a.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value from mustFlush() when DeletedSegmentOperation queued up.", a.mustFlush());
// Flush everything.
a.flush(TIMEOUT).join();
Assert.assertFalse("Unexpected value from mustFlush() after Deletion.", a.mustFlush());
AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value from isDeleted() after Deletion.", a.getMetadata().isDeleted());
Assert.assertTrue("Unexpected value from isDeletedInStorage() after Deletion.", a.getMetadata().isDeletedInStorage());
// Verify that no segment exists in Storage after the flush.
boolean existsInStorage = context.storage.exists(a.getMetadata().getName(), TIMEOUT).join();
Assert.assertFalse("Segment still exists in Storage after Deletion.", existsInStorage);
}
Assert.assertFalse("Pending merger source segment not deleted.", context.storage.exists(withMergerSource.getMetadata().getName(), TIMEOUT).join());
Assert.assertTrue("Attributes not deleted for non-merged segment.", context.dataSource.getPersistedAttributes(notSealed.getMetadata().getId()).isEmpty());
Assert.assertTrue("Attributes not deleted for merger source segment.", context.dataSource.getPersistedAttributes(withMergerSource.getMetadata().getId()).isEmpty());
Assert.assertTrue("Attributes not deleted for empty segment with attributes.", context.dataSource.getPersistedAttributes(emptyWithAttributes.getMetadata().getId()).isEmpty());
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileTruncate.
/**
* Tests the ability of the SegmentAggregator to reconcile StreamSegmentTruncateOperations.
*/
@Test
public void testReconcileTruncate() throws Exception {
val rnd = new Random(0);
byte[] storageData = new byte[100];
rnd.nextBytes(storageData);
// Write some data to the segment in Storage.
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
context.storage.openWrite(context.segmentAggregator.getMetadata().getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(storageData), storageData.length, TIMEOUT)).join();
val sm = context.containerMetadata.getStreamSegmentMetadata(context.segmentAggregator.getMetadata().getId());
sm.setLength(storageData.length);
sm.setStorageLength(storageData.length);
// The truncate succeeds, but we throw some random error, indicating that it didn't.
context.storage.setTruncateInterceptor((segmentName, offset, storage) -> {
context.storage.truncateDirectly(writeHandle(segmentName), offset);
throw new IntentionalException(String.format("S=%s", segmentName));
});
// Attempt to seal.
StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(truncateOp);
// First time: attempt to flush/truncate, which must end in failure.
AssertExtensions.assertThrows("IntentionalException did not propagate to flush() caller.", () -> context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS), ex -> Exceptions.unwrap(ex) instanceof IntentionalException);
context.storage.setTruncateInterceptor(null);
// Second time: we are in reconciliation mode, so flush must succeed (and update internal state based on storage).
context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Verify outcome.
Assert.assertEquals("Unexpected truncation offset in Storage.", truncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testTruncateAlreadySealedSegment.
/**
* Tests the SegmentAggregator's behavior when an already Sealed Segment is opened and truncated.
*/
@Test
public void testTruncateAlreadySealedSegment() throws Exception {
// Pre-create the segment, write some data, and then seal it.
val rnd = new Random(0);
byte[] storageData = new byte[100];
rnd.nextBytes(storageData);
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a segment, add some data, and seal it in storage.
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.storage.openWrite(context.segmentAggregator.getMetadata().getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(storageData), storageData.length, TIMEOUT).thenCompose(v -> context.storage.seal(h, TIMEOUT))).join();
val sm = context.containerMetadata.getStreamSegmentMetadata(context.segmentAggregator.getMetadata().getId());
sm.setLength(storageData.length);
sm.setStorageLength(storageData.length);
sm.markSealed();
sm.markSealedInStorage();
// Initialize the SegmentAggregator.
context.segmentAggregator.initialize(TIMEOUT).join();
// Generate and add a Seal Operation.
StorageOperation truncateOp = generateTruncateAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(truncateOp);
Assert.assertTrue("Unexpected value returned by mustFlush() after adding StreamSegmentTruncateOperation.", context.segmentAggregator.mustFlush());
// Call flush and verify that the entire Aggregator got flushed and the Truncate got persisted to Storage.
context.segmentAggregator.flush(TIMEOUT).join();
// Verify data.
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes in Storage.", storageData.length, storageInfo.getLength());
Assert.assertEquals("Unexpected truncation offset in Storage.", truncateOp.getStreamSegmentOffset(), context.storage.getTruncationOffset(context.segmentAggregator.getMetadata().getName()));
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testMergeWithStorageErrors.
/**
* Tests the flush() method with Append and MergeTransactionOperations.
*/
@Test
public void testMergeWithStorageErrors() throws Exception {
// Storage Errors
// This is number of appends per Segment/Transaction - there will be a lot of appends here.
final int appendCount = 100;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
// Initialize all segments.
context.segmentAggregator.initialize(TIMEOUT).join();
for (SegmentAggregator a : context.transactionAggregators) {
a.initialize(TIMEOUT).join();
}
// Store written data by segment - so we can check it later.
HashMap<Long, ByteArrayOutputStream> dataBySegment = new HashMap<>();
// Add a few appends to each Transaction aggregator and to the parent aggregator and seal all Transactions.
for (int i = 0; i < context.transactionAggregators.length; i++) {
SegmentAggregator transactionAggregator = context.transactionAggregators[i];
long transactionId = transactionAggregator.getMetadata().getId();
ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
dataBySegment.put(transactionId, writtenData);
for (int appendId = 0; appendId < appendCount; appendId++) {
StorageOperation appendOp = generateAppendAndUpdateMetadata(appendId, transactionId, context);
transactionAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
transactionAggregator.add(generateSealAndUpdateMetadata(transactionId, context));
}
// Merge all the Transactions in the parent Segment.
@Cleanup ByteArrayOutputStream parentData = new ByteArrayOutputStream();
for (int transIndex = 0; transIndex < context.transactionAggregators.length; transIndex++) {
// Merge this Transaction into the parent & record its data in the final parent data array.
long transactionId = context.transactionAggregators[transIndex].getMetadata().getId();
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(transactionId, context));
ByteArrayOutputStream transactionData = dataBySegment.get(transactionId);
parentData.write(transactionData.toByteArray());
transactionData.close();
}
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setConcatSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setConcatAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
// Flush all the Aggregators, while checking that the right errors get handled and can be recovered from.
tryFlushAllSegments(context, () -> setException.set(null), setException::get);
// Verify that all Transactions are now fully merged.
for (SegmentAggregator transactionAggregator : context.transactionAggregators) {
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
Assert.assertTrue("Merged Transaction was not marked as deleted in metadata.", transactionMetadata.isDeleted());
Assert.assertFalse("Merged Transaction still exists in storage.", context.storage.exists(transactionMetadata.getName(), TIMEOUT).join());
}
// Verify that in the end, the contents of the parents is as expected.
byte[] expectedData = parentData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed/merged to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testSealWithStorageErrors.
/**
* Tests the flush() method with Append and StreamSegmentSealOperations when there are Storage errors.
*/
@Test
public void testSealWithStorageErrors() throws Exception {
// Add some appends and seal, and then flush together. Verify that everything got flushed in one go.
final int appendCount = 1000;
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // Extra high length threshold.
appendCount * 50).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MAX_FLUSH_SIZE_BYTES, 10000).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).build();
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length (not bothering with flushing here; testFlushSeal() covers that).
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
}
// Generate and add a Seal Operation.
StorageOperation sealOp = generateSealAndUpdateMetadata(SEGMENT_ID, context);
context.segmentAggregator.add(sealOp);
// Have the writes fail every few attempts with a well known exception.
AtomicBoolean generateSyncException = new AtomicBoolean(true);
AtomicBoolean generateAsyncException = new AtomicBoolean(true);
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setSealSyncErrorInjector(new ErrorInjector<>(count -> generateSyncException.getAndSet(false), exceptionSupplier));
context.storage.setSealAsyncErrorInjector(new ErrorInjector<>(count -> generateAsyncException.getAndSet(false), exceptionSupplier));
// Call flush and verify that the entire Aggregator got flushed and the Seal got persisted to Storage.
int attemptCount = 4;
for (int i = 0; i < attemptCount; i++) {
// Repeat a number of times, at least once should work.
setException.set(null);
try {
WriterFlushResult flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
} else {
// Not expecting any exception this time.
throw ex;
}
}
if (!generateAsyncException.get() && !generateSyncException.get() && setException.get() == null) {
// We are done. We got at least one through.
break;
}
}
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
SegmentProperties storageInfo = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageInfo.getLength());
Assert.assertTrue("Segment is not sealed in storage post flush.", storageInfo.isSealed());
Assert.assertTrue("Segment is not marked in metadata as sealed in storage post flush.", context.segmentAggregator.getMetadata().isSealedInStorage());
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
}
Aggregations