use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testCacheEssentialOnlyMode.
/**
* Tests the ability of the Read Index to handle "Essential-Only" cache mode, where only cache entries that are not
* yet persisted to Storage may be added to the cache.
*/
@Test
public void testCacheEssentialOnlyMode() throws Exception {
val rnd = new Random(0);
// Cache block size.
val appendSize = 4 * 1024;
val segmentLength = 10 * appendSize;
// Setup a cache policy that will keep at most 4 blocks in the cache, and enter essential mode after 4 blocks too
// NOTE: blocks includes the metadata block (internal to the cache), so usable blocks is 3.
CachePolicy cachePolicy = new CachePolicy(segmentLength, 0.3, 0.4, Duration.ofHours(1000), Duration.ofSeconds(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Not blocking anything now.
context.cacheStorage.appendReturnBlocker = null;
// Create segment, generate some content for it, setup its metadata and write 40% of it to Storage.
long segmentId = createSegment(0, context);
val segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
createSegmentsInStorage(context);
val segmentData = new byte[segmentLength];
rnd.nextBytes(segmentData);
val part1 = new ByteArraySegment(segmentData, 0, appendSize);
val part2 = new ByteArraySegment(segmentData, appendSize, appendSize);
val part3 = new ByteArraySegment(segmentData, 2 * appendSize, appendSize);
val part4 = new ByteArraySegment(segmentData, 3 * appendSize, appendSize);
val part5 = new ByteArraySegment(segmentData, 4 * appendSize, appendSize);
segmentMetadata.setLength(segmentLength);
segmentMetadata.setStorageLength(part1.getLength() + part2.getLength());
context.storage.openWrite(segmentMetadata.getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(segmentData), (int) segmentMetadata.getStorageLength(), TIMEOUT)).join();
val insertCount = new AtomicInteger(0);
val storageReadCount = new AtomicInteger(0);
context.cacheStorage.insertCallback = address -> insertCount.incrementAndGet();
context.storage.setReadInterceptor((segment, wrappedStorage) -> storageReadCount.incrementAndGet());
// Helper for reading a segment part.
BiConsumer<Long, BufferView> readPart = (partOffset, partContents) -> {
try {
@Cleanup val rr = context.readIndex.read(segmentId, partOffset, partContents.getLength(), TIMEOUT);
val readData = rr.readRemaining(partContents.getLength(), TIMEOUT);
Assert.assertEquals(partContents, BufferView.wrap(readData));
} catch (Exception ex) {
throw new CompletionException(ex);
}
};
// Read parts 1 and 2 (separately). They should be cached as individual entries.
readPart.accept(0L, part1);
Assert.assertEquals(1, storageReadCount.get());
// Cache insertion is done async. Need to wait until we write
AssertExtensions.assertEventuallyEquals(1, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(1, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
boolean evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
readPart.accept((long) part1.getLength(), part2);
// We expect 2 storage reads and also 2 cache inserts.
Assert.assertEquals(2, storageReadCount.get());
// This one is done asynchronously.
AssertExtensions.assertEventuallyEquals(2, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(2, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
// Append parts 3, 4 and 5.
context.readIndex.append(segmentId, segmentMetadata.getStorageLength(), part3);
// This insertion is done synchronously.
Assert.assertEquals(3, insertCount.get());
// Eviction (part 1) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 3 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength(), part4);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 4, insertCount.get());
// Eviction (part 2) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 4 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength() + part4.getLength(), part5);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 5, insertCount.get());
// Nothing to evict.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expecting an eviction after writing 5 blocks.", evicted);
Assert.assertTrue("Expected to be in essential-only mode after pinning 3 blocks.", context.cacheManager.isEssentialEntriesOnly());
// Verify that re-reading parts 1 and 2 results in no cache inserts.
insertCount.set(0);
storageReadCount.set(0);
int expectedReadCount = 0;
for (int i = 0; i < 5; i++) {
readPart.accept(0L, part1);
readPart.accept((long) part1.getLength(), part2);
expectedReadCount += 2;
}
Assert.assertTrue("Not expected to have exited essential-only mode.", context.cacheManager.isEssentialEntriesOnly());
Assert.assertEquals("Unexpected number of storage reads in essential-only mode.", expectedReadCount, storageReadCount.get());
Assert.assertEquals("Unexpected number of cache inserts in essential-only mode.", 0, insertCount.get());
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerReadIndexTests method testStorageReadsConcurrentWithOverwrite.
private void testStorageReadsConcurrentWithOverwrite(int offsetDeltaBetweenReads) throws Exception {
testStorageReadsConcurrent(offsetDeltaBetweenReads, 1, (context, metadata) -> {
// Do nothing.
}, (context, metadata) -> {
// Check all the appended data. It must not have been overridden.
Assert.assertEquals("Not expecting any extra data in this test.", metadata.getLength(), metadata.getStorageLength());
val readResult = context.readIndex.read(metadata.getId(), 0, (int) metadata.getStorageLength(), TIMEOUT);
// Read from segment.
byte[] segmentData = new byte[(int) metadata.getStorageLength()];
readResult.readRemaining(segmentData, TIMEOUT);
// Then from Storage.
byte[] storageData = new byte[segmentData.length];
context.storage.openRead(metadata.getName()).thenCompose(handle -> context.storage.read(handle, 0, storageData, 0, storageData.length, TIMEOUT)).join();
Assert.assertArrayEquals("Unexpected appended data read back.", storageData, segmentData);
// The cleanup is async, so we must keep trying to check until it is done.
AssertExtensions.assertEventuallyEquals("Unexpected number of bytes in the cache.", (long) storageData.length, () -> context.cacheStorage.getState().getStoredBytes(), 10, TIMEOUT.toMillis());
});
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class AsyncReadResultProcessorTests method testProcessAll.
/**
* Tests the {@link AsyncReadResultProcessor#processAll} method.
*/
@Test
public void testProcessAll() throws Exception {
// Pre-generate some entries.
ArrayList<byte[]> entries = new ArrayList<>();
int totalLength = generateEntries(entries);
// Setup an entry provider supplier.
AtomicInteger currentIndex = new AtomicInteger();
StreamSegmentReadResult.NextEntrySupplier supplier = (offset, length, makeCopy) -> {
int idx = currentIndex.getAndIncrement();
if (idx == entries.size() - 1) {
// Future read result.
Supplier<BufferView> entryContentsSupplier = () -> new ByteArraySegment(entries.get(idx));
return new TestFutureReadResultEntry(offset, length, entryContentsSupplier, executorService());
} else if (idx >= entries.size()) {
return null;
}
// Normal read.
return new CacheReadResultEntry(offset, entries.get(idx), 0, entries.get(idx).length);
};
// Fetch all the data and compare with expected.
@Cleanup StreamSegmentReadResult rr = new StreamSegmentReadResult(0, totalLength, supplier, "");
val result = AsyncReadResultProcessor.processAll(rr, executorService(), TIMEOUT);
val actualData = result.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).getReader();
val expectedData = new SequenceInputStream(Iterators.asEnumeration(entries.stream().map(ByteArrayInputStream::new).iterator()));
AssertExtensions.assertStreamEquals("Unexpected data read back.", expectedData, actualData, totalLength);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class AsyncTableEntryReaderTests method testBufferCompaction.
private <T> void testBufferCompaction(GetEntryReader<T> createReader, Function<T, TableKey> getKey, Function<T, BufferView> getValue) throws Exception {
// Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing).
val keyLength = 3987;
// Must be less than AsyncTableEntryReader.INITIAL_READ_LENGTH / 2 (to ease testing)..
val valueLength = 3123;
val serializer = new EntrySerializer();
// Generate a number of entries. We only care about the first one, but we want to ensure that we have enough other
// data to force the ReadResult to try to read more.
val testItems = generateTestItems(() -> keyLength, () -> valueLength);
val entries = testItems.stream().filter(i -> !i.isRemoval).map(i -> TableEntry.unversioned(new ByteArraySegment(i.key), new ByteArraySegment(i.value))).collect(Collectors.toList());
// Search for the first Key/Entry. This makes it easier as we don't have to guess the versions, offsets, etc.
val soughtEntry = entries.get(0);
val segmentData = serializer.serializeUpdate(entries).getCopy();
@Cleanup val readResultNoCompact = new ReadResultMock(segmentData, keyLength + valueLength + 20, keyLength + 200);
val readerNoCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
testBufferCompaction(readerNoCompact, readResultNoCompact, getKey, getValue, false);
@Cleanup val readResultWithCompact = new ReadResultMock(segmentData, segmentData.length, segmentData.length);
val readerWithCompact = createReader.apply(soughtEntry.getKey().getKey(), 0L, serializer, new TimeoutTimer(TIMEOUT));
testBufferCompaction(readerWithCompact, readResultWithCompact, getKey, getValue, true);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class BookKeeperLogTests method testMissingEmptyLedgers.
/**
* Verifies that {@link BookKeeperLog#initialize} is able to handle the situation when a Ledger is marked as Empty
* but it is also deleted from BookKeeper. This ledger should be ignored and not cause the initialization to fail.
*/
@Test
public void testMissingEmptyLedgers() throws Exception {
final int count = 10;
// Every 10th Ledger has data.
final int writeEvery = 5;
final Predicate<Integer> shouldAppendAnything = i -> i % writeEvery == 0;
val currentMetadata = new AtomicReference<LogMetadata>();
for (int i = 0; i < count; i++) {
boolean isEmpty = !shouldAppendAnything.test(i);
// boolean isDeleted = shouldDelete.test(i);
try (BookKeeperLog log = (BookKeeperLog) createDurableDataLog()) {
log.initialize(TIMEOUT);
currentMetadata.set(log.loadMetadata());
// Delete the last Empty ledger, if any.
val toDelete = Lists.reverse(currentMetadata.get().getLedgers()).stream().filter(m -> m.getStatus() == LedgerMetadata.Status.Empty).findFirst().orElse(null);
if (toDelete != null) {
Ledgers.delete(toDelete.getLedgerId(), this.factory.get().getBookKeeperClient());
}
// Append some data to this Ledger, if needed - this will mark it as NotEmpty.
if (!isEmpty) {
log.append(new CompositeByteArraySegment(getWriteData()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
}
}
// Now try to delete a valid ledger and verify that an exception was actually thrown.
val validLedgerToDelete = Lists.reverse(currentMetadata.get().getLedgers()).stream().filter(m -> m.getStatus() != LedgerMetadata.Status.Empty).findFirst().orElse(null);
if (validLedgerToDelete != null) {
Ledgers.delete(validLedgerToDelete.getLedgerId(), this.factory.get().getBookKeeperClient());
}
AssertExtensions.assertThrows("No exception thrown if valid ledger was deleted.", () -> {
@Cleanup val log = createDurableDataLog();
log.initialize(TIMEOUT);
}, ex -> ex instanceof DurableDataLogException && ex.getCause() instanceof BKException.BKNoSuchLedgerExistsOnMetadataServerException);
}
Aggregations