use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class ContainerReadIndexTests method appendData.
private void appendData(Collection<Long> segmentIds, Map<Long, ByteArrayOutputStream> segmentContents, TestContext context, Runnable callback) throws Exception {
int writeId = 0;
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
ByteArraySegment data = getAppendData(segmentMetadata.getName(), segmentId, i, writeId);
writeId++;
appendSingleWrite(segmentId, data, context);
recordAppend(segmentId, data, segmentContents);
if (callback != null) {
callback.run();
}
}
}
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class ContainerReadIndexTests method testBatchedRead.
/**
* Tests the ability for the ReadIndex to batch multiple index entries together into a bigger read. This test
* writes a lot of very small appends to the index, then issues a full read (from the beginning) while configuring
* the read index to return results of no less than a particular size. As an added bonus, it also forces a Storage
* Read towards the end to make sure the ReadIndex doesn't coalesce those into the result as well.
*/
@Test
public void testBatchedRead() throws Exception {
final int totalAppendLength = 500 * 1000;
final int maxAppendLength = 100;
final int minReadLength = 16 * 1024;
final byte[] segmentData = new byte[totalAppendLength];
final Random rnd = new Random(0);
rnd.nextBytes(segmentData);
final ReadIndexConfig config = ReadIndexConfig.builder().with(ReadIndexConfig.MEMORY_READ_MIN_LENGTH, minReadLength).build();
@Cleanup TestContext context = new TestContext(config, CachePolicy.INFINITE);
// Create the segment in Storage and populate it with all the data (one segment is sufficient for this test).
final long segmentId = createSegment(0, context);
createSegmentsInStorage(context);
final UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
val writeHandle = context.storage.openWrite(segmentMetadata.getName()).join();
context.storage.write(writeHandle, 0, new ByteArrayInputStream(segmentData), segmentData.length, TIMEOUT).join();
segmentMetadata.setStorageLength(segmentData.length);
// Add the contents of the segment to the read index using very small appends (same data as in Storage).
int writtenLength = 0;
int remainingLength = totalAppendLength;
int lastCacheOffset = -1;
while (remainingLength > 0) {
int appendLength = rnd.nextInt(maxAppendLength) + 1;
if (appendLength < remainingLength) {
// Make another append.
byte[] appendData = new byte[appendLength];
System.arraycopy(segmentData, writtenLength, appendData, 0, appendLength);
appendSingleWrite(segmentId, new ByteArraySegment(appendData), context);
writtenLength += appendLength;
remainingLength -= appendLength;
} else {
// This would be the last append. Don't add it, so force the read index to load it from Storage.
lastCacheOffset = writtenLength;
appendLength = remainingLength;
writtenLength += appendLength;
remainingLength = 0;
segmentMetadata.setLength(writtenLength);
}
}
// Check all the appended data.
@Cleanup ReadResult readResult = context.readIndex.read(segmentId, 0, totalAppendLength, TIMEOUT);
long expectedCurrentOffset = 0;
boolean encounteredStorageRead = false;
while (readResult.hasNext()) {
ReadResultEntry entry = readResult.next();
if (entry.getStreamSegmentOffset() < lastCacheOffset) {
Assert.assertEquals("Expecting only a Cache entry before switch offset.", ReadResultEntryType.Cache, entry.getType());
} else {
Assert.assertEquals("Expecting only a Storage entry on or after switch offset.", ReadResultEntryType.Storage, entry.getType());
entry.requestContent(TIMEOUT);
entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
encounteredStorageRead = true;
}
// Check the entry contents.
byte[] entryData = entry.getContent().join().getCopy();
AssertExtensions.assertArrayEquals("Unexpected data read at offset " + expectedCurrentOffset, segmentData, (int) expectedCurrentOffset, entryData, 0, entryData.length);
expectedCurrentOffset += entryData.length;
// cut short by the storage entry.
if (expectedCurrentOffset < lastCacheOffset) {
AssertExtensions.assertGreaterThanOrEqual("Expecting a ReadResultEntry of a minimum length for cache hit.", minReadLength, entryData.length);
}
}
Assert.assertEquals("Not encountered any storage reads, even though one was forced.", lastCacheOffset > 0, encounteredStorageRead);
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class ContainerReadIndexTests method testStorageReadsConcurrentSmallLarge.
private void testStorageReadsConcurrentSmallLarge(int segmentLength, int read1Offset, int read1Length, int read2Offset, int read2Length) throws Exception {
// We only expect 2 Storage reads for this test.
val expectedStorageReadCount = 2;
val cachePolicy = new CachePolicy(100, 0.01, 1.0, Duration.ofMillis(10), Duration.ofMillis(10));
val config = ReadIndexConfig.builder().with(ReadIndexConfig.MEMORY_READ_MIN_LENGTH, DEFAULT_CONFIG.getMemoryReadMinLength()).with(ReadIndexConfig.STORAGE_READ_ALIGNMENT, segmentLength).build();
@Cleanup TestContext context = new TestContext(config, cachePolicy);
// Create the segment
val segmentId = createSegment(0, context);
val metadata = context.metadata.getStreamSegmentMetadata(segmentId);
context.storage.create(metadata.getName(), TIMEOUT).join();
// Write some data to the segment in Storage.
val rnd = new Random(0);
val segmentData = new ByteArraySegment(new byte[segmentLength]);
rnd.nextBytes(segmentData.array());
context.storage.openWrite(metadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, segmentData.getReader(), segmentData.getLength(), TIMEOUT)).join();
metadata.setLength(segmentData.getLength());
metadata.setStorageLength(segmentData.getLength());
@Cleanup("release") val firstReadBlocker = new ReusableLatch();
@Cleanup("release") val firstRead = new ReusableLatch();
@Cleanup("release") val secondReadBlocker = new ReusableLatch();
@Cleanup("release") val secondRead = new ReusableLatch();
val cacheInsertCount = new AtomicInteger();
context.cacheStorage.insertCallback = address -> cacheInsertCount.incrementAndGet();
val storageReadCount = new AtomicInteger();
context.storage.setReadInterceptor((segment, wrappedStorage) -> {
int readCount = storageReadCount.incrementAndGet();
if (readCount == 1) {
firstRead.release();
Exceptions.handleInterrupted(firstReadBlocker::await);
} else if (readCount == 2) {
secondRead.release();
Exceptions.handleInterrupted(secondReadBlocker::await);
}
});
// Initiate the first Storage Read.
val read1Result = context.readIndex.read(segmentId, read1Offset, read1Length, TIMEOUT);
val read1Data = new byte[read1Length];
val read1Future = CompletableFuture.runAsync(() -> read1Result.readRemaining(read1Data, TIMEOUT), executorService());
// Wait for it to process.
firstRead.await();
// Initiate the second storage read.
val read2Result = context.readIndex.read(segmentId, read2Offset, read2Length, TIMEOUT);
val read2Data = new byte[read2Length];
val read2Future = CompletableFuture.runAsync(() -> read2Result.readRemaining(read2Data, TIMEOUT), executorService());
secondRead.await();
// Unblock the first Storage Read and wait for it to complete.
firstReadBlocker.release();
read1Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Unblock second Storage Read.
secondReadBlocker.release();
read2Future.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Wait for the data from the second read to be fully added to the cache (background task).
TestUtils.await(() -> {
try {
return context.readIndex.read(0, read2Offset, read2Length, TIMEOUT).next().getType() == ReadResultEntryType.Cache;
} catch (StreamSegmentNotExistsException ex) {
throw new CompletionException(ex);
}
}, 10, TIMEOUT.toMillis());
// Verify that the initial read requests retrieved the data correctly.
Assert.assertEquals("Initial Read 1 (Storage)", segmentData.slice(read1Offset, read1Length), new ByteArraySegment(read1Data));
Assert.assertEquals("Initial Read 2 (Storage)", segmentData.slice(read2Offset, read2Length), new ByteArraySegment(read2Data));
// Re-issue the read requests for the exact same offsets. This time it should be from the cache.
val read1Data2 = new byte[read1Length];
context.readIndex.read(segmentId, read1Offset, read1Length, TIMEOUT).readRemaining(read1Data2, TIMEOUT);
Assert.assertArrayEquals("Reissued Read 1 (Cache)", read1Data, read1Data2);
val read2Data2 = new byte[read2Length];
context.readIndex.read(segmentId, read2Offset, read2Length, TIMEOUT).readRemaining(read2Data2, TIMEOUT);
Assert.assertArrayEquals("Reissued Read 2 (Cache)", read2Data, read2Data2);
// Verify that we did the expected number of Storage/Cache operations.
Assert.assertEquals("Unexpected number of storage reads.", expectedStorageReadCount, storageReadCount.get());
Assert.assertTrue("Unexpected number of cache inserts.", cacheInsertCount.get() == 3 || cacheInsertCount.get() == 1);
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class ContainerReadIndexTests method testTrimCache.
/**
* Tests the {@link ContainerReadIndex#trimCache()} method.
*/
@Test
public void testTrimCache() throws Exception {
// Create a CachePolicy with a set number of generations and a known max size.
// Each generation contains exactly one entry, so the number of generations is also the number of entries.
// We append one byte at each time. This allows us to test edge cases as well by having the finest precision when
// it comes to selecting which bytes we want evicted and which kept.
final int appendCount = 100;
final int segmentId = 123;
final byte[] appendData = new byte[2];
val removedEntryCount = new AtomicInteger();
@Cleanup TestContext context = new TestContext();
context.metadata.enterRecoveryMode();
context.readIndex.enterRecoveryMode(context.metadata);
// To ease our testing, we disable appends and instruct the TestCache to report the same value for UsedBytes as it
// has for StoredBytes. This shields us from having to know internal details about the layout of the cache.
context.cacheStorage.usedBytesSameAsStoredBytes = true;
context.cacheStorage.disableAppends = true;
context.cacheStorage.deleteCallback = e -> removedEntryCount.incrementAndGet();
createSegment(segmentId, context);
val metadata = context.metadata.getStreamSegmentMetadata(segmentId);
metadata.setLength(appendCount * appendData.length);
for (int i = 0; i < appendCount; i++) {
long offset = i * appendData.length;
context.readIndex.append(segmentId, offset, new ByteArraySegment(appendData));
}
// Gradually increase the StorageLength of the segment and invoke trimCache twice at every step. We want to verify
// that it also does not evict more than it should if it has nothing to do.
int deltaIncrease = 0;
while (metadata.getStorageLength() < metadata.getLength()) {
val trim1 = context.readIndex.trimCache();
Assert.assertEquals("Not expecting any bytes trimmed.", 0, trim1);
// Every time we trim, increase the StorageLength by a bigger amount - but make sure we don't exceed the length of the segment.
deltaIncrease = (int) Math.min(metadata.getLength() - metadata.getStorageLength(), deltaIncrease + appendData.length);
metadata.setStorageLength(Math.min(metadata.getLength(), metadata.getStorageLength() + deltaIncrease));
removedEntryCount.set(0);
val trim2 = context.readIndex.trimCache();
Assert.assertEquals("Unexpected number of bytes trimmed.", deltaIncrease, trim2);
Assert.assertEquals("Unexpected number of cache entries evicted.", deltaIncrease / appendData.length, removedEntryCount.get());
}
// Take the index out of recovery mode.
context.metadata.exitRecoveryMode();
context.readIndex.exitRecoveryMode(true);
// Verify that the entries have actually been evicted.
for (int i = 0; i < appendCount; i++) {
long offset = i * appendData.length;
@Cleanup val readResult = context.readIndex.read(segmentId, offset, appendData.length, TIMEOUT);
val first = readResult.next();
Assert.assertEquals("", ReadResultEntryType.Storage, first.getType());
}
// Verify trimCache() doesn't work when we are not in recovery mode.
AssertExtensions.assertThrows("trimCache worked in non-recovery mode.", context.readIndex::trimCache, ex -> ex instanceof IllegalStateException);
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class SystemJournal method writeRecordBatch.
/**
* Writes a single batch of {@link SystemJournalRecord}
*/
private CompletableFuture<Void> writeRecordBatch(Collection<SystemJournalRecord> records) {
val batch = SystemJournalRecordBatch.builder().systemJournalRecords(records).build();
ByteArraySegment bytes;
try {
bytes = BATCH_SERIALIZER.serialize(batch);
} catch (IOException e) {
return CompletableFuture.failedFuture(new ChunkStorageException(getSystemJournalChunkName(), "Unable to serialize", e));
}
// Persist
// Repeat until not successful.
val attempt = new AtomicInteger();
val done = new AtomicBoolean();
return Futures.loop(() -> !done.get() && attempt.get() < config.getMaxJournalWriteAttempts(), () -> writeToJournal(bytes).thenAcceptAsync(v -> {
log.trace("SystemJournal[{}] Logging system log records - journal={}, batch={}.", containerId, currentHandle.get().getChunkName(), batch);
recordsSinceSnapshot.incrementAndGet();
done.set(true);
}, executor).handleAsync((v, e) -> {
attempt.incrementAndGet();
if (e != null) {
val ex = Exceptions.unwrap(e);
// Throw if retries exhausted.
if (attempt.get() >= config.getMaxJournalWriteAttempts()) {
throw new CompletionException(ex);
}
log.warn("SystemJournal[{}] Error while writing journal {}. Attempt#{}", containerId, getSystemJournalChunkName(containerId, epoch, currentFileIndex.get()), attempt.get(), e);
// In that case we start a new journal file and retry.
if (ex instanceof InvalidOffsetException) {
return null;
}
if (ex instanceof ChunkStorageException) {
return null;
}
// Unknown Error
throw new CompletionException(ex);
} else {
// No exception just return the value.
return v;
}
}, executor).thenAcceptAsync(v -> {
// Add a new log file if required.
if (!chunkStorage.supportsAppend() || !config.isAppendEnabled() || !done.get()) {
newChunkRequired.set(true);
}
}, executor), executor);
}
Aggregations