use of org.junit.rules.Timeout in project pravega by pravega.
the class DataFrameReaderTests method testReadsWithDataLogFailure.
/**
* Tests the case when the DataFrameReader reads from a log and it encounters log read failures.
* 1. Initial read failures.
* 2. Somewhere in the middle of reading.
*/
@Test
public void testReadsWithDataLogFailure() throws Exception {
// Fail reads synchronously every X attempts.
int failReadSyncEvery = 3;
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(100, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
records.addAll(DataFrameTestHelpers.generateLogItems(100, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(Callbacks::doNothing, Callbacks::doNothing, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (TestLogItem r : records) {
b.append(r);
}
}
TestSerializer logItemFactory = new TestSerializer();
// Test 1: Initial call to getReader.
ErrorInjector<Exception> getReaderErrorInjector = new ErrorInjector<>(// Fail every time.
count -> true, () -> new DataLogNotAvailableException("intentional getReader exception"));
dataLog.setReadErrorInjectors(getReaderErrorInjector, null);
AssertExtensions.assertThrows("No exception or wrong type of exception thrown by getNext() with exception thrown by getReader().", () -> new DataFrameReader<>(dataLog, logItemFactory, CONTAINER_ID), ex -> Exceptions.unwrap(ex) == getReaderErrorInjector.getLastCycleException());
// Test 2: Failures during getNext().
ErrorInjector<Exception> readErrorInjector = new ErrorInjector<>(count -> count % failReadSyncEvery == 0, () -> new DataLogNotAvailableException("intentional getNext exception"));
dataLog.setReadErrorInjectors(null, readErrorInjector);
testReadWithException(dataLog, logItemFactory, ex -> ex == readErrorInjector.getLastCycleException());
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class DataFrameReaderTests method testReadsWithDeserializationFailure.
/**
* Tests the case when the DataFrameReader reads from a log and it encounters LogItem SerializationExceptions.
*/
@Test
public void testReadsWithDeserializationFailure() throws Exception {
// Fail deserialization every X records (write-wise).
int failDeserializationEvery = 11;
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(100, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
records.addAll(DataFrameTestHelpers.generateLogItems(100, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(Callbacks::doNothing, Callbacks::doNothing, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (TestLogItem r : records) {
b.append(r);
}
}
ErrorInjector<SerializationException> errorInjector = new ErrorInjector<>(count -> count % failDeserializationEvery == 0, () -> new SerializationException("TestLogItem.deserialize intentional"));
TestSerializer logItemFactory = new TestSerializer();
logItemFactory.setDeserializationErrorInjector(errorInjector);
testReadWithException(dataLog, logItemFactory, ex -> ex instanceof DataCorruptionException);
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class DataFrameBuilderTests method testAppendNoFailure.
private void testAppendNoFailure(int delayMillis) throws Exception {
// Happy case: append a bunch of data, and make sure the frames that get output contain it.
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(RECORD_COUNT / 2, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
records.addAll(DataFrameTestHelpers.generateLogItems(RECORD_COUNT / 2, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, delayMillis, executorService())) {
dataLog.initialize(TIMEOUT);
val order = new HashMap<DataFrameBuilder.CommitArgs, Integer>();
List<DataFrameBuilder.CommitArgs> commitFrames = Collections.synchronizedList(new ArrayList<>());
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(DataFrameTestHelpers.appendOrder(order), commitFrames::add, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (TestLogItem item : records) {
b.append(item);
}
b.close();
}
// Wait for all the frames commit callbacks to be invoked. Even though the DataFrameBuilder waits (upon close)
// for the OrderedItemProcessor to finish, there are other callbacks chained that need to be completed (such
// as the one collecting frames in the list above).
TestUtils.await(() -> commitFrames.size() >= order.size(), delayMillis, TIMEOUT.toMillis());
// It is quite likely that acks will arrive out of order. The DataFrameBuilder has no responsibility for
// rearrangement; that should be done by its user.
commitFrames.sort(Comparator.comparingInt(order::get));
// Check the correctness of the commit callback.
AssertExtensions.assertGreaterThan("Not enough Data Frames were generated.", 1, commitFrames.size());
DataFrameBuilder.CommitArgs previousCommitArgs = null;
for (val ca : commitFrames) {
if (previousCommitArgs != null) {
AssertExtensions.assertGreaterThanOrEqual("CommitArgs.getLastFullySerializedSequenceNumber() is not monotonically increasing.", previousCommitArgs.getLastFullySerializedSequenceNumber(), ca.getLastFullySerializedSequenceNumber());
AssertExtensions.assertGreaterThanOrEqual("CommitArgs.getLastStartedSequenceNumber() is not monotonically increasing.", previousCommitArgs.getLastStartedSequenceNumber(), ca.getLastStartedSequenceNumber());
AssertExtensions.assertGreaterThanOrEqual("CommitArgs.getLogAddress() is not monotonically increasing.", previousCommitArgs.getLogAddress().getSequence(), ca.getLogAddress().getSequence());
}
previousCommitArgs = ca;
}
// Read all entries in the Log and interpret them as DataFrames, then verify the records can be reconstructed.
val frames = dataLog.getAllEntries(readItem -> DataFrame.read(readItem.getPayload(), readItem.getLength(), readItem.getAddress()));
DataFrameTestHelpers.checkReadRecords(frames, records, r -> new ByteArraySegment(r.getFullSerialization()));
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method setupMergeRead.
/**
* Sets up a partial-merge Future Read for the two segments:
* 1. Writes some data to transactionId (both Read Index and Storage).
* 2. Calls beginMerge(parentId, transactionId)
* 3. Executes a Storage concat (transactionId -> parentId)
* 4. Clears the cache and returns a ReadResultEntry that requests data at the first offset of the merged transaction.
* NOTE: this does not call completeMerge().
*/
private ReadResultEntry setupMergeRead(long parentId, long transactionId, byte[] txnData, TestContext context) throws Exception {
int mergeOffset = 1;
UpdateableSegmentMetadata parentMetadata = context.metadata.getStreamSegmentMetadata(parentId);
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
appendSingleWrite(parentId, new ByteArraySegment(new byte[mergeOffset]), context);
context.storage.openWrite(parentMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(new byte[mergeOffset]), mergeOffset, TIMEOUT)).join();
// Write something to the transaction, and make sure it also makes its way to Storage.
appendSingleWrite(transactionId, new ByteArraySegment(txnData), context);
val transactionWriteHandle = context.storage.openWrite(transactionMetadata.getName()).join();
context.storage.write(transactionWriteHandle, 0, new ByteArrayInputStream(txnData), txnData.length, TIMEOUT).join();
transactionMetadata.setStorageLength(transactionMetadata.getLength());
// Seal & Begin-merge the transaction (do not seal in storage).
transactionMetadata.markSealed();
parentMetadata.setLength(transactionMetadata.getLength() + mergeOffset);
context.readIndex.beginMerge(parentId, mergeOffset, transactionId);
transactionMetadata.markMerged();
// Clear the cache.
context.cacheManager.applyCachePolicy();
// Issue read from the parent and fetch the first entry (there should only be one).
ReadResult rr = context.readIndex.read(parentId, mergeOffset, txnData.length, TIMEOUT);
Assert.assertTrue("Parent Segment read indicates no data available.", rr.hasNext());
ReadResultEntry entry = rr.next();
Assert.assertEquals("Unexpected offset for read result entry.", mergeOffset, entry.getStreamSegmentOffset());
Assert.assertEquals("Served read result entry is not from storage.", ReadResultEntryType.Storage, entry.getType());
// Merge the transaction in storage. Do not complete-merge it.
transactionMetadata.markSealed();
transactionMetadata.markSealedInStorage();
transactionMetadata.markDeleted();
context.storage.seal(transactionWriteHandle, TIMEOUT).join();
val parentWriteHandle = context.storage.openWrite(parentMetadata.getName()).join();
context.storage.concat(parentWriteHandle, 1, transactionWriteHandle.getSegmentName(), TIMEOUT).join();
parentMetadata.setStorageLength(parentMetadata.getLength());
// Attempt to extract data from the read.
entry.requestContent(TIMEOUT);
Assert.assertFalse("Not expecting the read to be completed.", entry.getContent().isDone());
return entry;
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testCacheEviction.
/**
* Tests the ability to evict entries from the ReadIndex under various conditions:
* * If an entry is aged out
* * If an entry is pushed out because of cache space pressure.
*
* This also verifies that certain entries, such as RedirectReadIndexEntries and entries after the Storage Offset are
* not removed.
*
* The way this test goes is as follows (it's pretty subtle, because there aren't many ways to hook into the ReadIndex and see what it's doing)
* 1. It creates a bunch of segments, and populates them in storage (each) up to offset N/2-1 (this is called pre-storage)
* 2. It populates the ReadIndex for each of those segments from offset N/2 to offset N-1 (this is called post-storage)
* 3. It loads all the data from Storage into the ReadIndex, in entries of size equal to those already loaded in step #2.
* 3a. At this point, all the entries added in step #2 have Generations 0..A/4-1, and step #3 have generations A/4..A-1
* 4. Append more data at the end. This forces the generation to increase to 1.25A.
* 4a. Nothing should be evicted from the cache now, since the earliest items are all post-storage.
* 5. We 'touch' (read) the first 1/3 of pre-storage entries (offsets 0..N/4).
* 5a. At this point, those entries (offsets 0..N/6) will have the newest generations (1.25A..1.5A)
* 6. We append more data (equivalent to the data we touched)
* 6a. Nothing should be evicted, since those generations that were just eligible for removal were touched and bumped up.
* 7. We forcefully increase the current generation by 1 (without touching the ReadIndex)
* 7a. At this point, we expect all the pre-storage items, except the touched ones, to be evicted. This is generations 0.25A-0.75A.
* 8. Update the metadata and indicate that all the post-storage entries are now pre-storage and bump the generation by 0.75A.
* 8a. At this point, we expect all former post-storage items and pre-storage items to be evicted (in this order).
* <p>
* The final order of eviction (in terms of offsets, for each segment), is:
* * 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N (remember that we added quite a bunch of items after the initial run).
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testCacheEviction() throws Exception {
// Create a CachePolicy with a set number of generations and a known max size.
// Each generation contains exactly one entry, so the number of generations is also the number of entries.
// We append one byte at each time. This allows us to test edge cases as well by having the finest precision when
// it comes to selecting which bytes we want evicted and which kept.
final int appendSize = 1;
// This also doubles as number of generations (each generation, we add one append for each segment).
final int entriesPerSegment = 100;
final int cacheMaxSize = SEGMENT_COUNT * entriesPerSegment * appendSize;
// 25% of the entries are beyond the StorageOffset
final int postStorageEntryCount = entriesPerSegment / 4;
// 75% of the entries are before the StorageOffset.
final int preStorageEntryCount = entriesPerSegment - postStorageEntryCount;
CachePolicy cachePolicy = new CachePolicy(cacheMaxSize, 1.0, 1.0, Duration.ofMillis(1000 * 2 * entriesPerSegment), Duration.ofMillis(1000));
// To properly test this, we want predictable storage reads.
ReadIndexConfig config = ReadIndexConfig.builder().with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, appendSize).build();
ArrayList<Integer> removedEntries = new ArrayList<>();
@Cleanup TestContext context = new TestContext(config, cachePolicy);
// To ease our testing, we disable appends and instruct the TestCache to report the same value for UsedBytes as it
// has for StoredBytes. This shields us from having to know internal details about the layout of the cache.
context.cacheStorage.usedBytesSameAsStoredBytes = true;
context.cacheStorage.disableAppends = true;
// Record every cache removal.
context.cacheStorage.deleteCallback = removedEntries::add;
// Create the segments (metadata + storage).
ArrayList<Long> segmentIds = createSegments(context);
createSegmentsInStorage(context);
// Populate the Storage with appropriate data.
byte[] preStorageData = new byte[preStorageEntryCount * appendSize];
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
val handle = context.storage.openWrite(sm.getName()).join();
context.storage.write(handle, 0, new ByteArrayInputStream(preStorageData), preStorageData.length, TIMEOUT).join();
sm.setStorageLength(preStorageData.length);
sm.setLength(preStorageData.length);
}
val cacheMappings = new HashMap<Integer, SegmentOffset>();
// Callback that appends one entry at the end of the given segment id.
Consumer<Long> appendOneEntry = segmentId -> {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = new byte[appendSize];
long offset = sm.getLength();
sm.setLength(offset + data.length);
try {
context.cacheStorage.insertCallback = address -> cacheMappings.put(address, new SegmentOffset(segmentId, offset));
context.readIndex.append(segmentId, offset, new ByteArraySegment(data));
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
};
// Populate the ReadIndex with the Append entries (post-StorageOffset)
for (int i = 0; i < postStorageEntryCount; i++) {
segmentIds.forEach(appendOneEntry);
// Each time we make a round of appends (one per segment), we increment the generation in the CacheManager.
context.cacheManager.applyCachePolicy();
}
// Read all the data from Storage, making sure we carefully associate them with the proper generation.
for (int i = 0; i < preStorageEntryCount; i++) {
long offset = i * appendSize;
for (long segmentId : segmentIds) {
@Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
ReadResultEntry resultEntry = result.next();
Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Storage, resultEntry.getType());
CompletableFuture<Void> insertedInCache = new CompletableFuture<>();
context.cacheStorage.insertCallback = address -> {
cacheMappings.put(address, new SegmentOffset(segmentId, offset));
insertedInCache.complete(null);
};
resultEntry.requestContent(TIMEOUT);
BufferView contents = resultEntry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertFalse("Not expecting more data to be available for reading.", result.hasNext());
Assert.assertEquals("Unexpected ReadResultEntry length when trying to load up data into the ReadIndex Cache.", appendSize, contents.getLength());
// Wait for the entry to be inserted into the cache before moving on.
insertedInCache.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
context.cacheManager.applyCachePolicy();
}
Assert.assertEquals("Not expecting any removed Cache entries at this point (cache is not full).", 0, removedEntries.size());
// Append more data (equivalent to all post-storage entries), and verify that NO entries are being evicted (we cannot evict post-storage entries).
for (int i = 0; i < postStorageEntryCount; i++) {
segmentIds.forEach(appendOneEntry);
context.cacheManager.applyCachePolicy();
}
Assert.assertEquals("Not expecting any removed Cache entries at this point (only eligible entries were post-storage).", 0, removedEntries.size());
// 'Touch' the first few entries read from storage. This should move them to the back of the queue (they won't be the first ones to be evicted).
int touchCount = preStorageEntryCount / 3;
for (int i = 0; i < touchCount; i++) {
long offset = i * appendSize;
for (long segmentId : segmentIds) {
@Cleanup ReadResult result = context.readIndex.read(segmentId, offset, appendSize, TIMEOUT);
ReadResultEntry resultEntry = result.next();
Assert.assertEquals("Unexpected type of ReadResultEntry when trying to load up data into the ReadIndex Cache.", ReadResultEntryType.Cache, resultEntry.getType());
}
}
// Append more data (equivalent to the amount of data we 'touched'), and verify that the entries we just touched are not being removed..
for (int i = 0; i < touchCount; i++) {
segmentIds.forEach(appendOneEntry);
context.cacheManager.applyCachePolicy();
}
Assert.assertEquals("Not expecting any removed Cache entries at this point (we touched old entries and they now have the newest generation).", 0, removedEntries.size());
// Increment the generations so that we are caught up to just before the generation where the "touched" items now live.
context.cacheManager.applyCachePolicy();
// We expect all but the 'touchCount' pre-Storage entries to be removed.
int expectedRemovalCount = (preStorageEntryCount - touchCount) * SEGMENT_COUNT;
Assert.assertEquals("Unexpected number of removed entries after having forced out all pre-storage entries.", expectedRemovalCount, removedEntries.size());
// Now update the metadata and indicate that all the post-storage data has been moved to storage.
segmentIds.forEach(segmentId -> {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
sm.setStorageLength(sm.getLength());
});
// We add one artificial entry, which we'll be touching forever and ever; this forces the CacheManager to
// update its current generation every time. We will be ignoring this entry for our test.
SegmentMetadata readSegment = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
appendOneEntry.accept(readSegment.getId());
// Now evict everything (whether by size of by aging out).
for (int i = 0; i < cachePolicy.getMaxGenerations(); i++) {
@Cleanup ReadResult result = context.readIndex.read(readSegment.getId(), readSegment.getLength() - appendSize, appendSize, TIMEOUT);
result.next();
context.cacheManager.applyCachePolicy();
}
int expectedRemovalCountPerSegment = entriesPerSegment + touchCount + postStorageEntryCount;
int expectedTotalRemovalCount = SEGMENT_COUNT * expectedRemovalCountPerSegment;
Assert.assertEquals("Unexpected number of removed entries after having forced out all the entries.", expectedTotalRemovalCount, removedEntries.size());
// Finally, verify that the evicted items are in the correct order (for each segment). See this test's description for details.
for (long segmentId : segmentIds) {
List<SegmentOffset> segmentRemovedKeys = removedEntries.stream().map(cacheMappings::get).filter(e -> e.segmentId == segmentId).collect(Collectors.toList());
Assert.assertEquals("Unexpected number of removed entries for segment " + segmentId, expectedRemovalCountPerSegment, segmentRemovedKeys.size());
// The correct order of eviction (N=entriesPerSegment) is: 0.25N-0.75N, 0.75N..N, N..1.25N, 0..0.25N, 1.25N..1.5N.
// This is equivalent to the following tests
// 0.25N-1.25N
checkOffsets(segmentRemovedKeys, segmentId, 0, entriesPerSegment, entriesPerSegment * appendSize / 4, appendSize);
// 0..0.25N
checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment, entriesPerSegment / 4, 0, appendSize);
// 1.25N..1.5N
checkOffsets(segmentRemovedKeys, segmentId, entriesPerSegment + entriesPerSegment / 4, entriesPerSegment / 4, (int) (entriesPerSegment * appendSize * 1.25), appendSize);
}
}
Aggregations