use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method appendData.
private void appendData(Collection<Long> segmentIds, Map<Long, ByteArrayOutputStream> segmentContents, TestContext context, Runnable callback) throws Exception {
int writeId = 0;
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = getAppendData(segmentMetadata.getName(), segmentId, i, writeId);
writeId++;
appendSingleWrite(segmentId, data, context);
recordAppend(segmentId, data, segmentContents);
if (callback != null) {
callback.run();
}
}
}
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testStorageReadTransactionNoCache.
// region Scenario-based tests
/**
* Tests the following Scenario, where the ReadIndex would either read from a bad offset or fail with an invalid offset
* when reading in certain conditions:
* * A segment has a transaction, which has N bytes written to it.
* * The transaction is merged into its parent segment at offset M > N.
* * At least one byte of the transaction is evicted from the cache
* * A read is issued to the parent segment for that byte that was evicted
* * The ReadIndex is supposed to issue a Storage Read with an offset inside the transaction range (so translate
* from the parent's offset to the transaction's offset). However, after the read, it is supposed to look like the
* data was read from the parent segment, so it should not expose the adjusted offset at all.
* <p>
* This very specific unit test is a result of a regression found during testing.
*/
@Test
public void testStorageReadTransactionNoCache() throws Exception {
CachePolicy cachePolicy = new CachePolicy(1, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy);
// Create parent segment and one transaction
long parentId = createSegment(0, context);
UpdateableSegmentMetadata parentMetadata = context.metadata.getStreamSegmentMetadata(parentId);
long transactionId = createTransaction(parentMetadata, 1, context);
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
createSegmentsInStorage(context);
ByteArrayOutputStream writtenStream = new ByteArrayOutputStream();
// Write something to the transaction, and make sure it also makes its way to Storage.
byte[] transactionWriteData = getAppendData(transactionMetadata.getName(), transactionId, 0, 0);
appendSingleWrite(transactionId, transactionWriteData, context);
val handle = context.storage.openWrite(transactionMetadata.getName()).join();
context.storage.write(handle, 0, new ByteArrayInputStream(transactionWriteData), transactionWriteData.length, TIMEOUT).join();
transactionMetadata.setStorageLength(transactionMetadata.getLength());
// Write some data to the parent, and make sure it is more than what we write to the transaction (hence the 10).
for (int i = 0; i < 10; i++) {
byte[] parentWriteData = getAppendData(parentMetadata.getName(), parentId, i, i);
appendSingleWrite(parentId, parentWriteData, context);
writtenStream.write(parentWriteData);
}
// Seal & Begin-merge the transaction (do not seal in storage).
transactionMetadata.markSealed();
long mergeOffset = parentMetadata.getLength();
parentMetadata.setLength(mergeOffset + transactionMetadata.getLength());
context.readIndex.beginMerge(parentId, mergeOffset, transactionId);
transactionMetadata.markMerged();
writtenStream.write(transactionWriteData);
// Clear the cache.
context.cacheManager.applyCachePolicy();
// Issue read from the parent.
ReadResult rr = context.readIndex.read(parentId, mergeOffset, transactionWriteData.length, TIMEOUT);
Assert.assertTrue("Parent Segment read indicates no data available.", rr.hasNext());
ByteArrayOutputStream readStream = new ByteArrayOutputStream();
long expectedOffset = mergeOffset;
while (rr.hasNext()) {
ReadResultEntry entry = rr.next();
Assert.assertEquals("Unexpected offset for read result entry.", expectedOffset, entry.getStreamSegmentOffset());
Assert.assertEquals("Served read result entry is not from storage.", ReadResultEntryType.Storage, entry.getType());
// Request contents and store for later use.
entry.requestContent(TIMEOUT);
ReadResultEntryContents contents = entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
byte[] readBuffer = new byte[contents.getLength()];
StreamHelpers.readAll(contents.getData(), readBuffer, 0, readBuffer.length);
readStream.write(readBuffer);
expectedOffset += contents.getLength();
}
byte[] readData = readStream.toByteArray();
Assert.assertArrayEquals("Unexpected data read back.", transactionWriteData, readData);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method initializeSegment.
private void initializeSegment(long segmentId, TestContext context) {
UpdateableSegmentMetadata metadata = context.metadata.getStreamSegmentMetadata(segmentId);
metadata.setLength(0);
metadata.setStorageLength(0);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method completeMergeTransactions.
private void completeMergeTransactions(HashMap<Long, ArrayList<Long>> transactionsBySegment, TestContext context) throws Exception {
for (Map.Entry<Long, ArrayList<Long>> e : transactionsBySegment.entrySet()) {
long parentId = e.getKey();
for (long transactionId : e.getValue()) {
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
transactionMetadata.markDeleted();
context.readIndex.completeMerge(parentId, transactionId);
}
}
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method beginMergeTransaction.
private long beginMergeTransaction(long transactionId, UpdateableSegmentMetadata parentMetadata, HashMap<Long, ByteArrayOutputStream> segmentContents, TestContext context) throws Exception {
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
// Transaction must be sealed first.
transactionMetadata.markSealed();
// Update parent length.
long mergeOffset = parentMetadata.getLength();
parentMetadata.setLength(mergeOffset + transactionMetadata.getLength());
// Do the ReadIndex merge.
context.readIndex.beginMerge(parentMetadata.getId(), mergeOffset, transactionId);
// Update the metadata.
transactionMetadata.markMerged();
// Update parent contents.
segmentContents.get(parentMetadata.getId()).write(segmentContents.get(transactionId).toByteArray());
segmentContents.remove(transactionId);
return mergeOffset;
}
Aggregations