use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method appendSingleWrite.
private void appendSingleWrite(long segmentId, byte[] data, TestContext context) throws Exception {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
// Make sure we increase the Length prior to appending; the ReadIndex checks for this.
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
context.readIndex.append(segmentId, offset, data);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testTruncateConcurrently.
/**
* Tests a scenario of truncation that happens concurrently with reading (segment is truncated while reading).
*/
@Test
public void testTruncateConcurrently() throws Exception {
@Cleanup TestContext context = new TestContext();
List<Long> segmentIds = createSegments(context).subList(0, 1);
long segmentId = segmentIds.get(0);
ByteArrayOutputStream segmentContents = new ByteArrayOutputStream();
appendData(segmentIds, Collections.singletonMap(segmentId, segmentContents), context);
// Begin a read result.
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
@Cleanup ReadResult rr = context.readIndex.read(segmentId, 0, (int) sm.getLength(), TIMEOUT);
ReadResultEntry firstEntry = rr.next();
firstEntry.requestContent(TIMEOUT);
int firstEntryLength = firstEntry.getContent().join().getLength();
AssertExtensions.assertLessThan("Unexpected length of the first read result entry.", sm.getLength(), firstEntryLength);
// Truncate the segment just after the end of the first returned read result.
sm.setStartOffset(firstEntryLength + 1);
ReadResultEntry secondEntry = rr.next();
Assert.assertTrue("Unexpected ReadResultEntryType.isTerminal of truncated result entry.", secondEntry.getType().isTerminal());
Assert.assertEquals("Unexpected ReadResultEntryType of truncated result entry.", ReadResultEntryType.Truncated, secondEntry.getType());
AssertExtensions.assertThrows("Expecting getContent() to return a failed CompletableFuture.", secondEntry::getContent, ex -> ex instanceof StreamSegmentTruncatedException);
Assert.assertFalse("Unexpected result from hasNext after processing terminal result entry.", rr.hasNext());
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method appendDataInStorage.
private void appendDataInStorage(TestContext context, HashMap<Long, ByteArrayOutputStream> segmentContents) {
int writeId = 0;
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
for (long segmentId : context.metadata.getAllStreamSegmentIds()) {
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = getAppendData(sm.getName(), segmentId, i, writeId);
writeId++;
// Make sure we increase the Length prior to appending; the ReadIndex checks for this.
long offset = context.storage.getStreamSegmentInfo(sm.getName(), TIMEOUT).join().getLength();
val handle = context.storage.openWrite(sm.getName()).join();
context.storage.write(handle, offset, new ByteArrayInputStream(data), data.length, TIMEOUT).join();
// Update metadata appropriately.
sm.setStorageLength(offset + data.length);
if (sm.getStorageLength() > sm.getLength()) {
sm.setLength(sm.getStorageLength());
}
recordAppend(segmentId, data, segmentContents);
}
}
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testInvalidOperations.
/**
* Tests the handling of invalid operations. Scenarios include:
* * Appends at wrong offsets
* * Bad SegmentIds
* * Invalid merge operations or sequences (complete before merge, merging non-Transactions, etc.)
* * Operations not allowed in or not in recovery
*/
@Test
public void testInvalidOperations() throws Exception {
@Cleanup TestContext context = new TestContext();
// Create a segment and a Transaction.
long segmentId = 0;
String segmentName = getSegmentName((int) segmentId);
context.metadata.mapStreamSegmentId(segmentName, segmentId);
initializeSegment(segmentId, context);
long transactionId = segmentId + 1;
String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName, UUID.randomUUID());
context.metadata.mapStreamSegmentId(transactionName, transactionId, segmentId);
initializeSegment(transactionId, context);
byte[] appendData = "foo".getBytes();
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
long segmentOffset = segmentMetadata.getLength();
segmentMetadata.setLength(segmentOffset + appendData.length);
context.readIndex.append(segmentId, segmentOffset, appendData);
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
long transactionOffset = transactionMetadata.getLength();
transactionMetadata.setLength(transactionOffset + appendData.length);
context.readIndex.append(transactionId, transactionOffset, appendData);
// 1. Appends at wrong offsets.
AssertExtensions.assertThrows("append did not throw the correct exception when provided with an offset beyond the Segment's DurableLogOffset.", () -> context.readIndex.append(segmentId, Integer.MAX_VALUE, "foo".getBytes()), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("append did not throw the correct exception when provided with invalid offset.", () -> context.readIndex.append(segmentId, 0, "foo".getBytes()), ex -> ex instanceof IllegalArgumentException);
// 2. Appends or reads with wrong SegmentIds
AssertExtensions.assertThrows("append did not throw the correct exception when provided with invalid SegmentId.", () -> context.readIndex.append(transactionId + 1, 0, "foo".getBytes()), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("read did not throw the correct exception when provided with invalid SegmentId.", () -> context.readIndex.read(transactionId + 1, 0, 1, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
// 3. TriggerFutureReads with wrong Segment Ids
ArrayList<Long> badSegmentIds = new ArrayList<>();
badSegmentIds.add(transactionId + 1);
AssertExtensions.assertThrows("triggerFutureReads did not throw the correct exception when provided with invalid SegmentId.", () -> context.readIndex.triggerFutureReads(badSegmentIds), ex -> ex instanceof IllegalArgumentException);
// 4. Merge with invalid arguments.
long secondSegmentId = transactionId + 1;
context.metadata.mapStreamSegmentId(getSegmentName((int) secondSegmentId), secondSegmentId);
initializeSegment(secondSegmentId, context);
AssertExtensions.assertThrows("beginMerge did not throw the correct exception when attempting to merge a stand-along Segment.", () -> context.readIndex.beginMerge(secondSegmentId, 0, segmentId), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("completeMerge did not throw the correct exception when called on a Transaction that did not have beginMerge called for.", () -> context.readIndex.completeMerge(segmentId, transactionId), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("beginMerge did not throw the correct exception when called on a Transaction that was not sealed.", () -> context.readIndex.beginMerge(segmentId, 0, transactionId), ex -> ex instanceof IllegalArgumentException);
transactionMetadata.markSealed();
long mergeOffset = segmentMetadata.getLength();
segmentMetadata.setLength(mergeOffset + transactionMetadata.getLength());
context.readIndex.beginMerge(segmentId, mergeOffset, transactionId);
AssertExtensions.assertThrows("append did not throw the correct exception when called on a Transaction that was already sealed.", () -> context.readIndex.append(transactionId, transactionMetadata.getLength(), "foo".getBytes()), ex -> ex instanceof IllegalArgumentException);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerReadIndexTests method testFutureReads.
/**
* Tests the behavior of Future Reads. Scenarios tested include:
* * Regular appends
* * Segment sealing
* * Transaction merging.
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testFutureReads() throws Exception {
// About 40-50% of the entire segment length.
final int nonSealReadLimit = APPENDS_PER_SEGMENT * 25;
// How many appends to trigger Future reads.
final int triggerFutureReadsEvery = 3;
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = createSegments(context);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, context);
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
HashMap<Long, ByteArrayOutputStream> readContents = new HashMap<>();
HashSet<Long> segmentsToSeal = new HashSet<>();
ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
HashMap<Long, TestReadResultHandler> entryHandlers = new HashMap<>();
// 1. Put all segment names into one list, for easier appends (but still keep the original lists at hand - we'll need them later).
ArrayList<Long> allSegmentIds = new ArrayList<>(segmentIds);
transactionsBySegment.values().forEach(allSegmentIds::addAll);
AtomicInteger writeCount = new AtomicInteger();
Runnable triggerFutureReadsCallback = () -> {
if (writeCount.incrementAndGet() % triggerFutureReadsEvery == 0) {
context.readIndex.triggerFutureReads(segmentIds);
}
};
// should stop upon reaching the limit).
for (int i = 0; i < segmentIds.size(); i++) {
long segmentId = segmentIds.get(i);
ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
readContents.put(segmentId, readContentsStream);
ReadResult readResult;
if (i < segmentIds.size() / 2) {
// We're going to seal this one at one point.
segmentsToSeal.add(segmentId);
readResult = context.readIndex.read(segmentId, 0, Integer.MAX_VALUE, TIMEOUT);
} else {
// Just a regular one, nothing special.
readResult = context.readIndex.read(segmentId, 0, nonSealReadLimit, TIMEOUT);
}
// The Read callback is only accumulating data in this test; we will then compare it against the real data.
TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
entryHandlers.put(segmentId, entryHandler);
readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
}
// 3. Add a bunch of writes.
appendData(allSegmentIds, segmentContents, context, triggerFutureReadsCallback);
// 4. Merge all the Transactions.
beginMergeTransactions(transactionsBySegment, segmentContents, context);
completeMergeTransactions(transactionsBySegment, context);
context.readIndex.triggerFutureReads(segmentIds);
// 5. Add more appends (to the parent segments)
for (int i = 0; i < 5; i++) {
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = getAppendData(segmentMetadata.getName(), segmentId, i, writeCount.incrementAndGet());
// Make sure we increase the Length prior to appending; the ReadIndex checks for this.
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
context.readIndex.append(segmentId, offset, data);
recordAppend(segmentId, data, segmentContents);
triggerFutureReadsCallback.run();
}
}
// 6. Seal those segments that we need to seal.
segmentsToSeal.forEach(segmentId -> context.metadata.getStreamSegmentMetadata(segmentId).markSealed());
// Trigger future reads on all segments we know about; some may not have had a trigger in a while (see callback above).
context.readIndex.triggerFutureReads(segmentIds);
// Now wait for all the reads to complete, and verify their results against the expected output.
Futures.allOf(entryHandlers.values().stream().map(TestReadResultHandler::getCompleted).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
readProcessors.forEach(AsyncReadResultProcessor::close);
// Check to see if any errors got thrown (and caught) during the reading process).
for (Map.Entry<Long, TestReadResultHandler> e : entryHandlers.entrySet()) {
Throwable err = e.getValue().getError().get();
if (err != null) {
// The next check (see below) will verify if the segments were properly read).
if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
}
}
}
// Compare, byte-by-byte, the outcome of the tail reads.
Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
for (long segmentId : segmentIds) {
boolean isSealed = segmentsToSeal.contains(segmentId);
byte[] expectedData = segmentContents.get(segmentId).toByteArray();
byte[] actualData = readContents.get(segmentId).toByteArray();
int expectedLength = isSealed ? expectedData.length : nonSealReadLimit;
Assert.assertEquals("Unexpected read length for segment " + expectedData.length, expectedLength, actualData.length);
AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentId, expectedData, 0, actualData, 0, actualData.length);
}
}
Aggregations