use of io.pravega.segmentstore.contracts.ReadResult in project pravega by pravega.
the class PravegaRequestProcessorTest method testReadSegmentTruncated.
@Test(timeout = 20000)
public void testReadSegmentTruncated() {
// Set up PravegaRequestProcessor instance to execute read segment request against
String streamSegmentName = "testReadSegment";
int readLength = 1000;
StreamSegmentStore store = mock(StreamSegmentStore.class);
ServerConnection connection = mock(ServerConnection.class);
PravegaRequestProcessor processor = new PravegaRequestProcessor(store, connection);
TestReadResultEntry entry1 = new TestReadResultEntry(ReadResultEntryType.Truncated, 0, readLength);
List<ReadResultEntry> results = new ArrayList<>();
results.add(entry1);
CompletableFuture<ReadResult> readResult = new CompletableFuture<>();
readResult.complete(new TestReadResult(0, readLength, results));
when(store.read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT)).thenReturn(readResult);
StreamSegmentInformation info = StreamSegmentInformation.builder().name(streamSegmentName).length(1234).startOffset(123).build();
when(store.getStreamSegmentInfo(streamSegmentName, false, PravegaRequestProcessor.TIMEOUT)).thenReturn(CompletableFuture.completedFuture(info));
// Execute and Verify readSegment calling stack in connection and store is executed as design.
processor.readSegment(new WireCommands.ReadSegment(streamSegmentName, 0, readLength, ""));
verify(store).read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT);
verify(store).getStreamSegmentInfo(streamSegmentName, false, PravegaRequestProcessor.TIMEOUT);
verify(connection).send(new WireCommands.SegmentIsTruncated(0, streamSegmentName, info.getStartOffset()));
verifyNoMoreInteractions(connection);
verifyNoMoreInteractions(store);
}
use of io.pravega.segmentstore.contracts.ReadResult in project pravega by pravega.
the class PravegaRequestProcessorTest method testReadSegment.
@Test(timeout = 20000)
public void testReadSegment() {
// Set up PravegaRequestProcessor instance to execute read segment request against
String streamSegmentName = "testReadSegment";
byte[] data = new byte[] { 1, 2, 3, 4, 6, 7, 8, 9 };
int readLength = 1000;
StreamSegmentStore store = mock(StreamSegmentStore.class);
ServerConnection connection = mock(ServerConnection.class);
PravegaRequestProcessor processor = new PravegaRequestProcessor(store, connection);
TestReadResultEntry entry1 = new TestReadResultEntry(ReadResultEntryType.Cache, 0, readLength);
entry1.complete(new ReadResultEntryContents(new ByteArrayInputStream(data), data.length));
TestReadResultEntry entry2 = new TestReadResultEntry(ReadResultEntryType.Future, data.length, readLength);
List<ReadResultEntry> results = new ArrayList<>();
results.add(entry1);
results.add(entry2);
CompletableFuture<ReadResult> readResult = new CompletableFuture<>();
readResult.complete(new TestReadResult(0, readLength, results));
when(store.read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT)).thenReturn(readResult);
// Execute and Verify readSegment calling stack in connection and store is executed as design.
processor.readSegment(new WireCommands.ReadSegment(streamSegmentName, 0, readLength, ""));
verify(store).read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT);
verify(connection).send(new WireCommands.SegmentRead(streamSegmentName, 0, true, false, ByteBuffer.wrap(data)));
verifyNoMoreInteractions(connection);
verifyNoMoreInteractions(store);
entry2.complete(new ReadResultEntryContents(new ByteArrayInputStream(data), data.length));
verifyNoMoreInteractions(connection);
verifyNoMoreInteractions(store);
}
use of io.pravega.segmentstore.contracts.ReadResult in project pravega by pravega.
the class PravegaRequestProcessorTest method testReadSegmentEmptySealed.
@Test(timeout = 20000)
public void testReadSegmentEmptySealed() {
// Set up PravegaRequestProcessor instance to execute read segment request against
String streamSegmentName = "testReadSegment";
int readLength = 1000;
StreamSegmentStore store = mock(StreamSegmentStore.class);
ServerConnection connection = mock(ServerConnection.class);
PravegaRequestProcessor processor = new PravegaRequestProcessor(store, connection);
TestReadResultEntry entry1 = new TestReadResultEntry(ReadResultEntryType.EndOfStreamSegment, 0, readLength);
List<ReadResultEntry> results = new ArrayList<>();
results.add(entry1);
CompletableFuture<ReadResult> readResult = new CompletableFuture<>();
readResult.complete(new TestReadResult(0, readLength, results));
when(store.read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT)).thenReturn(readResult);
// Execute and Verify readSegment calling stack in connection and store is executed as design.
processor.readSegment(new WireCommands.ReadSegment(streamSegmentName, 0, readLength, ""));
verify(store).read(streamSegmentName, 0, readLength, PravegaRequestProcessor.TIMEOUT);
verify(connection).send(new WireCommands.SegmentRead(streamSegmentName, 0, false, true, ByteBuffer.wrap(new byte[0])));
verifyNoMoreInteractions(connection);
verifyNoMoreInteractions(store);
}
use of io.pravega.segmentstore.contracts.ReadResult in project pravega by pravega.
the class ContainerReadIndexTests method testTruncateConcurrently.
/**
* Tests a scenario of truncation that happens concurrently with reading (segment is truncated while reading).
*/
@Test
public void testTruncateConcurrently() throws Exception {
@Cleanup TestContext context = new TestContext();
List<Long> segmentIds = createSegments(context).subList(0, 1);
long segmentId = segmentIds.get(0);
ByteArrayOutputStream segmentContents = new ByteArrayOutputStream();
appendData(segmentIds, Collections.singletonMap(segmentId, segmentContents), context);
// Begin a read result.
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(segmentId);
@Cleanup ReadResult rr = context.readIndex.read(segmentId, 0, (int) sm.getLength(), TIMEOUT);
ReadResultEntry firstEntry = rr.next();
firstEntry.requestContent(TIMEOUT);
int firstEntryLength = firstEntry.getContent().join().getLength();
AssertExtensions.assertLessThan("Unexpected length of the first read result entry.", sm.getLength(), firstEntryLength);
// Truncate the segment just after the end of the first returned read result.
sm.setStartOffset(firstEntryLength + 1);
ReadResultEntry secondEntry = rr.next();
Assert.assertTrue("Unexpected ReadResultEntryType.isTerminal of truncated result entry.", secondEntry.getType().isTerminal());
Assert.assertEquals("Unexpected ReadResultEntryType of truncated result entry.", ReadResultEntryType.Truncated, secondEntry.getType());
AssertExtensions.assertThrows("Expecting getContent() to return a failed CompletableFuture.", secondEntry::getContent, ex -> ex instanceof StreamSegmentTruncatedException);
Assert.assertFalse("Unexpected result from hasNext after processing terminal result entry.", rr.hasNext());
}
use of io.pravega.segmentstore.contracts.ReadResult in project pravega by pravega.
the class ContainerReadIndexTests method testFutureReads.
/**
* Tests the behavior of Future Reads. Scenarios tested include:
* * Regular appends
* * Segment sealing
* * Transaction merging.
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testFutureReads() throws Exception {
// About 40-50% of the entire segment length.
final int nonSealReadLimit = APPENDS_PER_SEGMENT * 25;
// How many appends to trigger Future reads.
final int triggerFutureReadsEvery = 3;
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = createSegments(context);
HashMap<Long, ArrayList<Long>> transactionsBySegment = createTransactions(segmentIds, context);
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
HashMap<Long, ByteArrayOutputStream> readContents = new HashMap<>();
HashSet<Long> segmentsToSeal = new HashSet<>();
ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
HashMap<Long, TestReadResultHandler> entryHandlers = new HashMap<>();
// 1. Put all segment names into one list, for easier appends (but still keep the original lists at hand - we'll need them later).
ArrayList<Long> allSegmentIds = new ArrayList<>(segmentIds);
transactionsBySegment.values().forEach(allSegmentIds::addAll);
AtomicInteger writeCount = new AtomicInteger();
Runnable triggerFutureReadsCallback = () -> {
if (writeCount.incrementAndGet() % triggerFutureReadsEvery == 0) {
context.readIndex.triggerFutureReads(segmentIds);
}
};
// should stop upon reaching the limit).
for (int i = 0; i < segmentIds.size(); i++) {
long segmentId = segmentIds.get(i);
ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
readContents.put(segmentId, readContentsStream);
ReadResult readResult;
if (i < segmentIds.size() / 2) {
// We're going to seal this one at one point.
segmentsToSeal.add(segmentId);
readResult = context.readIndex.read(segmentId, 0, Integer.MAX_VALUE, TIMEOUT);
} else {
// Just a regular one, nothing special.
readResult = context.readIndex.read(segmentId, 0, nonSealReadLimit, TIMEOUT);
}
// The Read callback is only accumulating data in this test; we will then compare it against the real data.
TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
entryHandlers.put(segmentId, entryHandler);
readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
}
// 3. Add a bunch of writes.
appendData(allSegmentIds, segmentContents, context, triggerFutureReadsCallback);
// 4. Merge all the Transactions.
beginMergeTransactions(transactionsBySegment, segmentContents, context);
completeMergeTransactions(transactionsBySegment, context);
context.readIndex.triggerFutureReads(segmentIds);
// 5. Add more appends (to the parent segments)
for (int i = 0; i < 5; i++) {
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
byte[] data = getAppendData(segmentMetadata.getName(), segmentId, i, writeCount.incrementAndGet());
// Make sure we increase the Length prior to appending; the ReadIndex checks for this.
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
context.readIndex.append(segmentId, offset, data);
recordAppend(segmentId, data, segmentContents);
triggerFutureReadsCallback.run();
}
}
// 6. Seal those segments that we need to seal.
segmentsToSeal.forEach(segmentId -> context.metadata.getStreamSegmentMetadata(segmentId).markSealed());
// Trigger future reads on all segments we know about; some may not have had a trigger in a while (see callback above).
context.readIndex.triggerFutureReads(segmentIds);
// Now wait for all the reads to complete, and verify their results against the expected output.
Futures.allOf(entryHandlers.values().stream().map(TestReadResultHandler::getCompleted).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
readProcessors.forEach(AsyncReadResultProcessor::close);
// Check to see if any errors got thrown (and caught) during the reading process).
for (Map.Entry<Long, TestReadResultHandler> e : entryHandlers.entrySet()) {
Throwable err = e.getValue().getError().get();
if (err != null) {
// The next check (see below) will verify if the segments were properly read).
if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
}
}
}
// Compare, byte-by-byte, the outcome of the tail reads.
Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
for (long segmentId : segmentIds) {
boolean isSealed = segmentsToSeal.contains(segmentId);
byte[] expectedData = segmentContents.get(segmentId).toByteArray();
byte[] actualData = readContents.get(segmentId).toByteArray();
int expectedLength = isSealed ? expectedData.length : nonSealReadLimit;
Assert.assertEquals("Unexpected read length for segment " + expectedData.length, expectedLength, actualData.length);
AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentId, expectedData, 0, actualData, 0, actualData.length);
}
}
Aggregations