use of io.pravega.common.util.BufferView in project pravega by pravega.
the class TableServiceTests method generateKeysForSegments.
private HashMap<BufferView, EntryData> generateKeysForSegments(ArrayList<String> segments, Random rnd) {
val result = new HashMap<BufferView, EntryData>();
val keysPerSegment = KEY_COUNT / segments.size();
for (val segmentName : segments) {
ArrayList<BufferView> keys;
if (isFixedKeyLength(segmentName)) {
val keyLength = getFixedKeyLength(segmentName);
keys = generateKeys(keysPerSegment, keyLength, keyLength, rnd);
} else {
keys = generateKeys(keysPerSegment, 1, MAX_KEY_LENGTH, rnd);
}
for (val key : keys) {
result.put(key, new EntryData(segmentName));
}
}
return result;
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class StreamSegmentReadIndex method createMemoryRead.
/**
* Creates a ReadResultEntry for data that is readily available in memory.
*
* @param entry The CacheIndexEntry to use.
* @param streamSegmentOffset The Offset in the StreamSegment where to the ReadResultEntry starts at.
* @param maxLength The maximum length of the Read, from the Offset of this ReadResultEntry.
* @param updateStats If true, the entry's cache generation is updated as a result of this call.
* @param makeCopy If true, any data retrieved from the Cache will be copied into a Heap buffer before being returned.
*/
@GuardedBy("lock")
private CacheReadResultEntry createMemoryRead(ReadIndexEntry entry, long streamSegmentOffset, int maxLength, boolean updateStats, boolean makeCopy) {
assert streamSegmentOffset >= entry.getStreamSegmentOffset() : String.format("streamSegmentOffset{%d} < entry.getStreamSegmentOffset{%d}", streamSegmentOffset, entry.getStreamSegmentOffset());
int entryOffset = (int) (streamSegmentOffset - entry.getStreamSegmentOffset());
int length = (int) Math.min(maxLength, entry.getLength() - entryOffset);
assert length > 0 : String.format("length{%d} <= 0. streamSegmentOffset = %d, maxLength = %d, entry.offset = %d, entry.length = %d", length, streamSegmentOffset, maxLength, entry.getStreamSegmentOffset(), entry.getLength());
BufferView data = this.cacheStorage.get(entry.getCacheAddress());
assert data != null : String.format("No Cache Entry could be retrieved for entry %s", entry);
if (updateStats) {
// Update its generation before returning it.
entry.setGeneration(this.summary.touchOne(entry.getGeneration()));
}
data = data.slice(entryOffset, length);
if (makeCopy) {
data = new ByteArraySegment(data.getCopy());
}
return new CacheReadResultEntry(entry.getStreamSegmentOffset() + entryOffset, data);
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class AsyncTableEntryReader method readEntryComponents.
/**
* Reads a single {@link TableEntry} from the given InputStream. The {@link TableEntry} itself is not constructed,
* rather all of its components are returned individually.
*
* @param input An InputStream to read from.
* @param segmentOffset The Segment Offset that the first byte of the InputStream maps to. This wll be used as a Version,
* unless the deserialized segment's Header contains an explicit version.
* @param serializer The {@link EntrySerializer} to use for deserializing entries.
* @return A {@link DeserializedEntry} that contains all the components of the {@link TableEntry}.
* @throws SerializationException If an Exception occurred while deserializing the {@link DeserializedEntry}.
*/
static DeserializedEntry readEntryComponents(BufferView.Reader input, long segmentOffset, EntrySerializer serializer) throws SerializationException {
val h = serializer.readHeader(input);
long version = getKeyVersion(h, segmentOffset);
BufferView key = input.readSlice(h.getKeyLength());
BufferView value = h.isDeletion() ? null : (h.getValueLength() == 0 ? BufferView.empty() : input.readSlice(h.getValueLength()));
return new DeserializedEntry(h, version, key, value);
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class ContainerReadIndexTests method testCacheEssentialOnlyMode.
/**
* Tests the ability of the Read Index to handle "Essential-Only" cache mode, where only cache entries that are not
* yet persisted to Storage may be added to the cache.
*/
@Test
public void testCacheEssentialOnlyMode() throws Exception {
val rnd = new Random(0);
// Cache block size.
val appendSize = 4 * 1024;
val segmentLength = 10 * appendSize;
// Setup a cache policy that will keep at most 4 blocks in the cache, and enter essential mode after 4 blocks too
// NOTE: blocks includes the metadata block (internal to the cache), so usable blocks is 3.
CachePolicy cachePolicy = new CachePolicy(segmentLength, 0.3, 0.4, Duration.ofHours(1000), Duration.ofSeconds(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Not blocking anything now.
context.cacheStorage.appendReturnBlocker = null;
// Create segment, generate some content for it, setup its metadata and write 40% of it to Storage.
long segmentId = createSegment(0, context);
val segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
createSegmentsInStorage(context);
val segmentData = new byte[segmentLength];
rnd.nextBytes(segmentData);
val part1 = new ByteArraySegment(segmentData, 0, appendSize);
val part2 = new ByteArraySegment(segmentData, appendSize, appendSize);
val part3 = new ByteArraySegment(segmentData, 2 * appendSize, appendSize);
val part4 = new ByteArraySegment(segmentData, 3 * appendSize, appendSize);
val part5 = new ByteArraySegment(segmentData, 4 * appendSize, appendSize);
segmentMetadata.setLength(segmentLength);
segmentMetadata.setStorageLength(part1.getLength() + part2.getLength());
context.storage.openWrite(segmentMetadata.getName()).thenCompose(h -> context.storage.write(h, 0, new ByteArrayInputStream(segmentData), (int) segmentMetadata.getStorageLength(), TIMEOUT)).join();
val insertCount = new AtomicInteger(0);
val storageReadCount = new AtomicInteger(0);
context.cacheStorage.insertCallback = address -> insertCount.incrementAndGet();
context.storage.setReadInterceptor((segment, wrappedStorage) -> storageReadCount.incrementAndGet());
// Helper for reading a segment part.
BiConsumer<Long, BufferView> readPart = (partOffset, partContents) -> {
try {
@Cleanup val rr = context.readIndex.read(segmentId, partOffset, partContents.getLength(), TIMEOUT);
val readData = rr.readRemaining(partContents.getLength(), TIMEOUT);
Assert.assertEquals(partContents, BufferView.wrap(readData));
} catch (Exception ex) {
throw new CompletionException(ex);
}
};
// Read parts 1 and 2 (separately). They should be cached as individual entries.
readPart.accept(0L, part1);
Assert.assertEquals(1, storageReadCount.get());
// Cache insertion is done async. Need to wait until we write
AssertExtensions.assertEventuallyEquals(1, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(1, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
boolean evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
readPart.accept((long) part1.getLength(), part2);
// We expect 2 storage reads and also 2 cache inserts.
Assert.assertEquals(2, storageReadCount.get());
// This one is done asynchronously.
AssertExtensions.assertEventuallyEquals(2, insertCount::get, TIMEOUT.toMillis());
AssertExtensions.assertEventuallyEquals(2, context.readIndex.getIndex(segmentId).getSummary()::size, TIMEOUT.toMillis());
// No eviction, but increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expected an eviction now.", evicted);
// Append parts 3, 4 and 5.
context.readIndex.append(segmentId, segmentMetadata.getStorageLength(), part3);
// This insertion is done synchronously.
Assert.assertEquals(3, insertCount.get());
// Eviction (part 1) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 3 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength(), part4);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 4, insertCount.get());
// Eviction (part 2) + increase generation.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertTrue("Expected an eviction after writing 4 blocks.", evicted);
context.readIndex.append(segmentId, segmentMetadata.getStorageLength() + part3.getLength() + part4.getLength(), part5);
Assert.assertEquals("Expected an insertion for appends even in essential-only mode.", 5, insertCount.get());
// Nothing to evict.
evicted = context.cacheManager.applyCachePolicy();
Assert.assertFalse("Not expecting an eviction after writing 5 blocks.", evicted);
Assert.assertTrue("Expected to be in essential-only mode after pinning 3 blocks.", context.cacheManager.isEssentialEntriesOnly());
// Verify that re-reading parts 1 and 2 results in no cache inserts.
insertCount.set(0);
storageReadCount.set(0);
int expectedReadCount = 0;
for (int i = 0; i < 5; i++) {
readPart.accept(0L, part1);
readPart.accept((long) part1.getLength(), part2);
expectedReadCount += 2;
}
Assert.assertTrue("Not expected to have exited essential-only mode.", context.cacheManager.isEssentialEntriesOnly());
Assert.assertEquals("Unexpected number of storage reads in essential-only mode.", expectedReadCount, storageReadCount.get());
Assert.assertEquals("Unexpected number of cache inserts in essential-only mode.", 0, insertCount.get());
}
use of io.pravega.common.util.BufferView in project pravega by pravega.
the class AsyncReadResultProcessorTests method testFutureReads.
/**
* Tests the AsyncReadResultProcessor on Future Reads (that are not yet available in memory, but soon would be).
*/
@Test
public void testFutureReads() throws Exception {
// Pre-generate some entries.
ArrayList<byte[]> entries = new ArrayList<>();
int totalLength = generateEntries(entries);
// Setup an entry provider supplier.
AtomicInteger currentIndex = new AtomicInteger();
StreamSegmentReadResult.NextEntrySupplier supplier = (offset, length, makeCopy) -> {
int idx = currentIndex.getAndIncrement();
if (idx >= entries.size()) {
return null;
}
Supplier<BufferView> entryContentsSupplier = () -> new ByteArraySegment(entries.get(idx));
return new TestFutureReadResultEntry(offset, length, entryContentsSupplier, executorService());
};
// Start an AsyncReadResultProcessor.
@Cleanup StreamSegmentReadResult rr = new StreamSegmentReadResult(0, totalLength, supplier, "");
TestReadResultHandler testReadResultHandler = new TestReadResultHandler(entries);
try (AsyncReadResultProcessor rp = AsyncReadResultProcessor.process(rr, testReadResultHandler, executorService())) {
// Wait for it to complete, and then verify that no errors have been recorded via the callbacks.
testReadResultHandler.completed.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
if (testReadResultHandler.error.get() != null) {
Assert.fail("Read failure: " + testReadResultHandler.error.toString());
}
Assert.assertEquals("Unexpected number of reads processed.", entries.size(), testReadResultHandler.readCount.get());
}
Assert.assertTrue("ReadResult was not closed when the AsyncReadResultProcessor was closed.", rr.isClosed());
}
Aggregations