use of io.pravega.shared.protocol.netty.WireCommands.ReadSegment in project pravega by pravega.
the class ReadTest method testReceivingReadCall.
@Test(timeout = 10000)
public void testReceivingReadCall() throws Exception {
String segmentName = "testReceivingReadCall";
int entries = 10;
byte[] data = new byte[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
StreamSegmentStore segmentStore = SERVICE_BUILDER.createStreamSegmentService();
// fill segment store with 10 entries; the total data size is 100 bytes.
fillStoreForSegment(segmentName, data, entries, segmentStore);
@Cleanup EmbeddedChannel channel = AppendTest.createChannel(segmentStore);
ByteBuf actual = Unpooled.buffer(entries * data.length);
while (actual.writerIndex() < actual.capacity()) {
SegmentRead result = (SegmentRead) AppendTest.sendRequest(channel, new ReadSegment(segmentName, actual.writerIndex(), 10000, "", 1L));
assertEquals(segmentName, result.getSegment());
assertEquals(result.getOffset(), actual.writerIndex());
assertFalse(result.isEndOfSegment());
actual.writeBytes(result.getData());
// release the ByteBuf and ensure it is deallocated.
assertTrue(result.getData().release());
if (actual.writerIndex() < actual.capacity()) {
// Prevent entering a tight loop by giving the store a bit of time to process al the appends internally
// before trying again.
Thread.sleep(10);
} else {
// Verify the last read result has the the atTail flag set to true.
assertTrue(result.isAtTail());
// mark the channel as finished
assertFalse(channel.finish());
}
}
ByteBuf expected = Unpooled.buffer(entries * data.length);
for (int i = 0; i < entries; i++) {
expected.writeBytes(data);
}
expected.writerIndex(expected.capacity()).resetReaderIndex();
actual.writerIndex(actual.capacity()).resetReaderIndex();
assertEquals(expected, actual);
// Release the ByteBuf and ensure it is deallocated.
assertTrue(actual.release());
assertTrue(expected.release());
}
use of io.pravega.shared.protocol.netty.WireCommands.ReadSegment in project pravega by pravega.
the class CommandEncoderTest method testFlushing.
@Test
public void testFlushing() throws Exception {
UnpooledByteBufAllocator allocator = new UnpooledByteBufAllocator(false, false);
CommandEncoder commandEncoder = new CommandEncoder(s -> new TestBatchSizeTracker(0), new TestMetricNotifier());
verifyFlush(commandEncoder, allocator, new Hello(1, 2));
verifyFlush(commandEncoder, allocator, new KeepAlive());
UUID uuid = new UUID(1, 2);
verifyFlush(commandEncoder, allocator, new SetupAppend(1, uuid, "segment", ""));
verifyFlush(commandEncoder, allocator, new Append("segment", uuid, 1L, new Event(allocator.buffer()), 1L));
allocator = new UnpooledByteBufAllocator(false, false);
commandEncoder = new CommandEncoder(s -> new TestBatchSizeTracker(1000), new TestMetricNotifier());
verifyFlush(commandEncoder, allocator, new Hello(1, 2));
verifyFlush(commandEncoder, allocator, new KeepAlive());
verifyFlush(commandEncoder, allocator, new SetupAppend(1, uuid, "segment", ""));
ByteBuf buffer = allocator.buffer();
verifyNoFlush(commandEncoder, allocator, new Append("segment", uuid, 1L, new Event(buffer), 1L));
buffer = allocator.buffer();
buffer.writeBytes(new byte[400]);
verifyNoFlush(commandEncoder, allocator, new Append("segment", uuid, 2L, new Event(buffer), 1L));
buffer = allocator.buffer();
buffer.writeBytes(new byte[400]);
verifyNoFlush(commandEncoder, allocator, new Append("segment", uuid, 3L, new Event(buffer), 1L));
buffer = allocator.buffer();
buffer.writeBytes(new byte[400]);
verifyFlush(commandEncoder, allocator, new Append("segment", uuid, 4L, new Event(buffer), 1L));
buffer = allocator.buffer();
buffer.writeBytes(new byte[400]);
verifyNoFlush(commandEncoder, allocator, new Append("segment", uuid, 5L, new Event(buffer), 1L));
verifyFlush(commandEncoder, allocator, new ReadSegment("segment", 0, 1000, "", 2L));
}
use of io.pravega.shared.protocol.netty.WireCommands.ReadSegment in project pravega by pravega.
the class PravegaRequestProcessor method readSegment.
// endregion
// region RequestProcessor Implementation
@Override
public void readSegment(ReadSegment readSegment) {
Timer timer = new Timer();
final String segment = readSegment.getSegment();
final String operation = "readSegment";
if (!verifyToken(segment, readSegment.getOffset(), readSegment.getDelegationToken(), operation)) {
return;
}
final int readSize = min(MAX_READ_SIZE, max(TYPE_PLUS_LENGTH_SIZE, readSegment.getSuggestedLength()));
long trace = LoggerHelpers.traceEnter(log, operation, readSegment);
segmentStore.read(segment, readSegment.getOffset(), readSize, TIMEOUT).thenAccept(readResult -> {
LoggerHelpers.traceLeave(log, operation, trace, readResult);
handleReadResult(readSegment, readResult);
this.statsRecorder.readComplete(timer.getElapsed());
}).exceptionally(ex -> handleException(readSegment.getRequestId(), segment, readSegment.getOffset(), operation, wrapCancellationException(ex)));
}
use of io.pravega.shared.protocol.netty.WireCommands.ReadSegment in project pravega by pravega.
the class PravegaRequestProcessor method handleReadResult.
/**
* Handles a readResult.
* If there are cached entries that can be returned without blocking only these are returned.
* Otherwise the call will request the data and setup a callback to return the data when it is available.
* If no data is available but it was detected that the Segment had been truncated beyond the current offset,
* an appropriate message is sent back over the connection.
*/
private void handleReadResult(ReadSegment request, ReadResult result) {
String segment = request.getSegment();
ArrayList<BufferView> cachedEntries = new ArrayList<>();
ReadResultEntry nonCachedEntry = collectCachedEntries(request.getOffset(), result, cachedEntries);
final String operation = "readSegment";
boolean truncated = nonCachedEntry != null && nonCachedEntry.getType() == Truncated;
boolean endOfSegment = nonCachedEntry != null && nonCachedEntry.getType() == EndOfStreamSegment;
boolean atTail = nonCachedEntry != null && nonCachedEntry.getType() == Future;
if (!cachedEntries.isEmpty() || endOfSegment) {
// We managed to collect some data. Send it.
ByteBuf data = toByteBuf(cachedEntries);
SegmentRead reply = new SegmentRead(segment, request.getOffset(), atTail, endOfSegment, data, request.getRequestId());
connection.send(reply);
this.statsRecorder.read(segment, reply.getData().readableBytes());
} else if (truncated) {
// We didn't collect any data, instead we determined that the current read offset was truncated.
// Determine the current Start Offset and send that back.
segmentStore.getStreamSegmentInfo(segment, TIMEOUT).thenAccept(info -> connection.send(new SegmentIsTruncated(request.getRequestId(), segment, info.getStartOffset(), EMPTY_STACK_TRACE, nonCachedEntry.getStreamSegmentOffset()))).exceptionally(e -> handleException(request.getRequestId(), segment, nonCachedEntry.getStreamSegmentOffset(), operation, wrapCancellationException(e)));
} else {
Preconditions.checkState(nonCachedEntry != null, "No ReadResultEntries returned from read!?");
nonCachedEntry.requestContent(TIMEOUT);
nonCachedEntry.getContent().thenAccept(contents -> {
ByteBuf data = toByteBuf(Collections.singletonList(contents));
SegmentRead reply = new SegmentRead(segment, nonCachedEntry.getStreamSegmentOffset(), atTail, endOfSegment, data, request.getRequestId());
connection.send(reply);
this.statsRecorder.read(segment, reply.getData().readableBytes());
}).exceptionally(exception -> {
Throwable e = Exceptions.unwrap(exception);
if (e instanceof StreamSegmentTruncatedException) {
// The Segment may have been truncated in Storage after we got this entry but before we managed
// to make a read. In that case, send the appropriate error back.
final String clientReplyStackTrace = replyWithStackTraceOnError ? e.getMessage() : EMPTY_STACK_TRACE;
connection.send(new SegmentIsTruncated(request.getRequestId(), segment, ((StreamSegmentTruncatedException) e).getStartOffset(), clientReplyStackTrace, nonCachedEntry.getStreamSegmentOffset()));
} else {
handleException(request.getRequestId(), segment, nonCachedEntry.getStreamSegmentOffset(), operation, wrapCancellationException(e));
}
return null;
}).exceptionally(e -> handleException(request.getRequestId(), segment, nonCachedEntry.getStreamSegmentOffset(), operation, wrapCancellationException(e)));
}
}
use of io.pravega.shared.protocol.netty.WireCommands.ReadSegment in project pravega by pravega.
the class FailingRequestProcessorTest method testEverythingThrows.
@Test
public void testEverythingThrows() {
assertThrows(IllegalStateException.class, () -> rp.hello(new Hello(0, 0)));
assertThrows(IllegalStateException.class, () -> rp.setupAppend(new SetupAppend(0, null, "", "")));
assertThrows(IllegalStateException.class, () -> rp.append(new Append("", null, 0, EMPTY_EVENT, 0)));
assertThrows(IllegalStateException.class, () -> rp.readSegment(new ReadSegment("", 0, 0, "", 0)));
assertThrows(IllegalStateException.class, () -> rp.updateSegmentAttribute(new UpdateSegmentAttribute(0, "", null, 0, 0, "")));
assertThrows(IllegalStateException.class, () -> rp.getSegmentAttribute(new GetSegmentAttribute(0, "", null, "")));
assertThrows(IllegalStateException.class, () -> rp.getStreamSegmentInfo(new WireCommands.GetStreamSegmentInfo(0, "", "")));
assertThrows(IllegalStateException.class, () -> rp.createSegment(new CreateSegment(0, "", (byte) 0, 0, "", 0)));
assertThrows(IllegalStateException.class, () -> rp.updateSegmentPolicy(new UpdateSegmentPolicy(0, "", (byte) 0, 0, "")));
assertThrows(IllegalStateException.class, () -> rp.createTableSegment(new CreateTableSegment(0, "", false, 0, "", 0)));
assertThrows(IllegalStateException.class, () -> rp.deleteTableSegment(new DeleteTableSegment(0, "", false, "")));
assertThrows(IllegalStateException.class, () -> rp.updateTableEntries(new UpdateTableEntries(0, "", "", null, 0)));
assertThrows(IllegalStateException.class, () -> rp.removeTableKeys(new RemoveTableKeys(0, "", "", null, 0)));
assertThrows(IllegalStateException.class, () -> rp.readTable(new ReadTable(0, "", "", null)));
assertThrows(IllegalStateException.class, () -> rp.readTableKeys(new ReadTableKeys(0, "", "", 0, null)));
assertThrows(IllegalStateException.class, () -> rp.readTableEntries(new ReadTableEntries(0, "", "", 0, null)));
assertThrows(IllegalStateException.class, () -> rp.mergeSegments(new MergeSegments(0, "", "", "")));
assertThrows(IllegalStateException.class, () -> rp.sealSegment(new SealSegment(0, "", "")));
assertThrows(IllegalStateException.class, () -> rp.truncateSegment(new TruncateSegment(0, "", 0, "")));
assertThrows(IllegalStateException.class, () -> rp.deleteSegment(new DeleteSegment(0, "", "")));
assertThrows(IllegalStateException.class, () -> rp.readTableEntries(new ReadTableEntries(0, "", "", 0, null)));
assertThrows(IllegalStateException.class, () -> rp.createTableSegment(new CreateTableSegment(0, "", false, 0, "", 0)));
assertThrows(IllegalStateException.class, () -> rp.readTableEntriesDelta(new ReadTableEntriesDelta(0, "", "", 0, 0)));
assertThrows(IllegalStateException.class, () -> rp.createTransientSegment(new CreateTransientSegment(0, new UUID(0, 0), "", "")));
assertThrows(IllegalStateException.class, () -> rp.connectionDropped());
}
Aggregations