use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class ContainerReadIndexTests method testStorageFailedReads.
/**
* Tests the ability to handle Storage read failures.
*/
@Test
public void testStorageFailedReads() {
// Create all segments (Storage and Metadata).
@Cleanup TestContext context = new TestContext();
ArrayList<Long> segmentIds = createSegments(context);
createSegmentsInStorage(context);
// Read beyond Storage actual offset (metadata is corrupt)
long testSegmentId = segmentIds.get(0);
UpdateableSegmentMetadata sm = context.metadata.getStreamSegmentMetadata(testSegmentId);
sm.setStorageLength(1024 * 1024);
sm.setLength(1024 * 1024);
AssertExtensions.assertThrows("Unexpected exception when attempting to read beyond the Segment length in Storage.", () -> {
@Cleanup ReadResult readResult = context.readIndex.read(testSegmentId, 0, 100, TIMEOUT);
Assert.assertTrue("Unexpected value from hasNext() when there should be at least one ReadResultEntry.", readResult.hasNext());
ReadResultEntry entry = readResult.next();
Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, entry.getType());
entry.requestContent(TIMEOUT);
entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}, ex -> ex instanceof ArrayIndexOutOfBoundsException);
// Segment not exists (exists in metadata, but not in Storage)
val handle = context.storage.openWrite(sm.getName()).join();
context.storage.delete(handle, TIMEOUT).join();
AssertExtensions.assertThrows("Unexpected exception when attempting to from a segment that exists in Metadata, but not in Storage.", () -> {
@Cleanup ReadResult readResult = context.readIndex.read(testSegmentId, 0, 100, TIMEOUT);
Assert.assertTrue("Unexpected value from hasNext() when there should be at least one ReadResultEntry.", readResult.hasNext());
ReadResultEntry entry = readResult.next();
Assert.assertEquals("Unexpected ReadResultEntryType.", ReadResultEntryType.Storage, entry.getType());
entry.requestContent(TIMEOUT);
entry.getContent().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}, ex -> ex instanceof StreamSegmentNotExistsException);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class StreamSegmentReadResultTests method testNextWaitOnPrevious.
/**
* Tests the ability to only return a next item if the previous returned item hasn't been consumed yet.
*/
@Test
public void testNextWaitOnPrevious() throws Exception {
AtomicReference<TestReadResultEntry> nextEntry = new AtomicReference<>();
StreamSegmentReadResult.NextEntrySupplier nes = (offset, length) -> nextEntry.get();
// We issue a read, get one item, do not consume it, and then read a second time.
@Cleanup StreamSegmentReadResult r = new StreamSegmentReadResult(START_OFFSET, MAX_RESULT_LENGTH, nes, "");
nextEntry.set(TestReadResultEntry.cache(START_OFFSET, MAX_RESULT_LENGTH));
TestReadResultEntry firstEntry = (TestReadResultEntry) r.next();
// Immediately request a second item, without properly consuming the first item.
nextEntry.set(TestReadResultEntry.cache(START_OFFSET + READ_ITEM_LENGTH, MAX_RESULT_LENGTH));
AssertExtensions.assertThrows("Second read was allowed even though the first read did not complete.", r::next, ex -> ex instanceof IllegalStateException);
firstEntry.complete(new ReadResultEntryContents(null, READ_ITEM_LENGTH));
ReadResultEntry secondEntry = r.next();
ReadResultEntryBase x;
Assert.assertEquals("Unexpected result from nextEntry.", nextEntry.get(), secondEntry);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class StreamSegmentReadResultTests method testNextFullyConsumed.
/**
* Tests the next() method which ends when the result is fully consumed (via offsets).
*/
@Test
public void testNextFullyConsumed() {
AtomicReference<TestReadResultEntry> nextEntry = new AtomicReference<>();
StreamSegmentReadResult.NextEntrySupplier nes = (offset, length) -> nextEntry.get();
// We issue a read with length = MAX_RESULT_LENGTH, and return items, 1 byte at a time.
@Cleanup StreamSegmentReadResult r = new StreamSegmentReadResult(START_OFFSET, MAX_RESULT_LENGTH, nes, "");
int expectedConsumedLength = 0;
for (int i = 0; i < MAX_RESULT_LENGTH; i += READ_ITEM_LENGTH) {
// Setup an item to be returned.
final long expectedStartOffset = START_OFFSET + i;
final int expectedReadLength = MAX_RESULT_LENGTH - i;
nextEntry.set(TestReadResultEntry.cache(expectedStartOffset, expectedReadLength));
// Get the result and verify we get exactly what we supplied.
Assert.assertTrue("hasNext() returned false even though we haven't consumed the entire result.", r.hasNext());
ReadResultEntry resultEntry = r.next();
Assert.assertEquals("Unexpected result from nextEntry.", nextEntry.get(), resultEntry);
// Verify the StreamSegmentReadResult does not update itself after returning a result.
Assert.assertEquals("getStreamSegmentStartOffset changed while iterating.", START_OFFSET, r.getStreamSegmentStartOffset());
Assert.assertEquals("getMaxResultLength changed while iterating.", MAX_RESULT_LENGTH, r.getMaxResultLength());
Assert.assertEquals("Unexpected value from getConsumedLength after returning a value but before completing result future.", expectedConsumedLength, r.getConsumedLength());
// Verify the StreamSegmentReadResult updates itself after the last returned result's future is completed.
nextEntry.get().complete(new ReadResultEntryContents(null, READ_ITEM_LENGTH));
expectedConsumedLength += READ_ITEM_LENGTH;
Assert.assertEquals("Unexpected value from getConsumedLength after returning a value and completing result future.", expectedConsumedLength, r.getConsumedLength());
}
// Verify we have reached the end.
Assert.assertEquals("Unexpected state of the StreamSegmentReadResult when consuming the entire result.", r.getMaxResultLength(), r.getConsumedLength());
Assert.assertFalse("hasNext() did not return false when the entire result is consumed.", r.hasNext());
ReadResultEntry resultEntry = r.next();
Assert.assertNull("next() did not return null when it was done.", resultEntry);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class StreamSegmentReadResultTests method testClose.
/**
* Tests the ability to close the result and cancel any items that were returned.
*/
@Test
public void testClose() {
AtomicReference<TestReadResultEntry> nextEntry = new AtomicReference<>();
StreamSegmentReadResult.NextEntrySupplier nes = (offset, length) -> nextEntry.get();
// We issue a read with length = MAX_RESULT_LENGTH, but we only get to read one item from it.
StreamSegmentReadResult r = new StreamSegmentReadResult(START_OFFSET, MAX_RESULT_LENGTH, nes, "");
nextEntry.set(TestReadResultEntry.cache(START_OFFSET, MAX_RESULT_LENGTH));
ReadResultEntry resultEntry = r.next();
// Close the result and verify we cannot read from it anymore and that the pending future is now canceled.
r.close();
Assert.assertTrue("Already returned result future is not canceled after closing the ReadResult.", resultEntry.getContent().isCancelled());
Assert.assertFalse("hasNext() did not return false after closing ", r.hasNext());
AssertExtensions.assertThrows("next() did not throw an appropriate exception when the ReadResult is closed.", r::next, ex -> ex instanceof ObjectClosedException);
}
use of io.pravega.segmentstore.contracts.ReadResultEntry in project pravega by pravega.
the class PravegaRequestProcessor method handleReadResult.
/**
* Handles a readResult.
* If there are cached entries that can be returned without blocking only these are returned.
* Otherwise the call will request the data and setup a callback to return the data when it is available.
* If no data is available but it was detected that the Segment had been truncated beyond the current offset,
* an appropriate message is sent back over the connection.
*/
private void handleReadResult(ReadSegment request, ReadResult result) {
String segment = request.getSegment();
ArrayList<ReadResultEntryContents> cachedEntries = new ArrayList<>();
ReadResultEntry nonCachedEntry = collectCachedEntries(request.getOffset(), result, cachedEntries);
boolean truncated = nonCachedEntry != null && nonCachedEntry.getType() == Truncated;
boolean endOfSegment = nonCachedEntry != null && nonCachedEntry.getType() == EndOfStreamSegment;
boolean atTail = nonCachedEntry != null && nonCachedEntry.getType() == Future;
if (!cachedEntries.isEmpty() || endOfSegment) {
// We managed to collect some data. Send it.
ByteBuffer data = copyData(cachedEntries);
SegmentRead reply = new SegmentRead(segment, request.getOffset(), atTail, endOfSegment, data);
connection.send(reply);
} else if (truncated) {
// We didn't collect any data, instead we determined that the current read offset was truncated.
// Determine the current Start Offset and send that back.
segmentStore.getStreamSegmentInfo(segment, false, TIMEOUT).thenAccept(info -> connection.send(new SegmentIsTruncated(nonCachedEntry.getStreamSegmentOffset(), segment, info.getStartOffset()))).exceptionally(e -> handleException(nonCachedEntry.getStreamSegmentOffset(), segment, "Read segment", e));
} else {
Preconditions.checkState(nonCachedEntry != null, "No ReadResultEntries returned from read!?");
nonCachedEntry.requestContent(TIMEOUT);
nonCachedEntry.getContent().thenAccept(contents -> {
ByteBuffer data = copyData(Collections.singletonList(contents));
connection.send(new SegmentRead(segment, nonCachedEntry.getStreamSegmentOffset(), false, endOfSegment, data));
}).exceptionally(e -> {
if (Exceptions.unwrap(e) instanceof StreamSegmentTruncatedException) {
// The Segment may have been truncated in Storage after we got this entry but before we managed
// to make a read. In that case, send the appropriate error back.
connection.send(new SegmentIsTruncated(nonCachedEntry.getStreamSegmentOffset(), segment, nonCachedEntry.getStreamSegmentOffset()));
} else {
handleException(nonCachedEntry.getStreamSegmentOffset(), segment, "Read segment", e);
}
return null;
}).exceptionally(e -> handleException(nonCachedEntry.getStreamSegmentOffset(), segment, "Read segment", e));
}
}
Aggregations