use of io.grpc.ServerCallHandler in project grpc-java by grpc.
the class CascadingTest method startCallTreeServer.
/**
* Create a tree of client to server calls where each received call on the server
* fans out to two downstream calls. Uses SimpleRequest.response_size to limit the nodeCount
* of the tree. One of the leaves will ABORT to trigger cancellation back up to tree.
*/
private void startCallTreeServer(int depthThreshold) throws IOException {
final AtomicInteger nodeCount = new AtomicInteger((2 << depthThreshold) - 1);
server = InProcessServerBuilder.forName("channel").executor(otherWork).addService(ServerInterceptors.intercept(service, new ServerInterceptor() {
@Override
public <ReqT, RespT> ServerCall.Listener<ReqT> interceptCall(final ServerCall<ReqT, RespT> call, Metadata headers, ServerCallHandler<ReqT, RespT> next) {
// Respond with the headers but nothing else.
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ReqT>() {
@Override
public void onMessage(final ReqT message) {
Messages.SimpleRequest req = (Messages.SimpleRequest) message;
if (nodeCount.decrementAndGet() == 0) {
// we are in the final leaf node so trigger an ABORT upwards
Context.currentContextExecutor(otherWork).execute(new Runnable() {
@Override
public void run() {
call.close(Status.ABORTED, new Metadata());
}
});
} else if (req.getResponseSize() != 0) {
// We are in a non leaf node so fire off two requests
req = req.toBuilder().setResponseSize(req.getResponseSize() - 1).build();
for (int i = 0; i < 2; i++) {
asyncStub.unaryCall(req, new StreamObserver<Messages.SimpleResponse>() {
@Override
public void onNext(Messages.SimpleResponse value) {
}
@Override
public void onError(Throwable t) {
Status status = Status.fromThrowable(t);
if (status.getCode() == Status.Code.CANCELLED) {
observedCancellations.countDown();
}
// Propagate closure upwards.
try {
call.close(status, new Metadata());
} catch (IllegalStateException t2) {
// Ignore error if already closed.
}
}
@Override
public void onCompleted() {
}
});
}
}
}
@Override
public void onCancel() {
receivedCancellations.countDown();
}
};
}
})).build();
server.start();
}
use of io.grpc.ServerCallHandler in project grpc-java by grpc.
the class ServerCalls method asyncUnaryRequestCall.
/**
* Creates a {@code ServerCallHandler} for a unary request call method of the service.
*
* @param method an adaptor to the actual method on the service implementation.
*/
private static <ReqT, RespT> ServerCallHandler<ReqT, RespT> asyncUnaryRequestCall(final UnaryRequestMethod<ReqT, RespT> method) {
return new ServerCallHandler<ReqT, RespT>() {
@Override
public ServerCall.Listener<ReqT> startCall(final ServerCall<ReqT, RespT> call, Metadata headers) {
final ServerCallStreamObserverImpl<ReqT, RespT> responseObserver = new ServerCallStreamObserverImpl<ReqT, RespT>(call);
// We expect only 1 request, but we ask for 2 requests here so that if a misbehaving client
// sends more than 1 requests, ServerCall will catch it. Note that disabling auto
// inbound flow control has no effect on unary calls.
call.request(2);
return new EmptyServerCallListener<ReqT>() {
ReqT request;
@Override
public void onMessage(ReqT request) {
// We delay calling method.invoke() until onHalfClose() to make sure the client
// half-closes.
this.request = request;
}
@Override
public void onHalfClose() {
if (request != null) {
method.invoke(request, responseObserver);
responseObserver.freeze();
if (call.isReady()) {
// Since we are calling invoke in halfClose we have missed the onReady
// event from the transport so recover it here.
onReady();
}
} else {
call.close(Status.INTERNAL.withDescription("Half-closed without a request"), new Metadata());
}
}
@Override
public void onCancel() {
responseObserver.cancelled = true;
if (responseObserver.onCancelHandler != null) {
responseObserver.onCancelHandler.run();
}
}
@Override
public void onReady() {
if (responseObserver.onReadyHandler != null) {
responseObserver.onReadyHandler.run();
}
}
};
}
};
}
use of io.grpc.ServerCallHandler in project grpc-java by grpc.
the class AbstractBenchmark method setup.
/**
* Initialize the environment for the executor.
*/
public void setup(ExecutorType clientExecutor, ExecutorType serverExecutor, MessageSize requestSize, MessageSize responseSize, FlowWindowSize windowSize, ChannelType channelType, int maxConcurrentStreams, int channelCount) throws Exception {
NettyServerBuilder serverBuilder;
NettyChannelBuilder channelBuilder;
if (channelType == ChannelType.LOCAL) {
LocalAddress address = new LocalAddress("netty-e2e-benchmark");
serverBuilder = NettyServerBuilder.forAddress(address);
serverBuilder.channelType(LocalServerChannel.class);
channelBuilder = NettyChannelBuilder.forAddress(address);
channelBuilder.channelType(LocalChannel.class);
} else {
ServerSocket sock = new ServerSocket();
// Pick a port using an ephemeral socket.
sock.bind(new InetSocketAddress(BENCHMARK_ADDR, 0));
SocketAddress address = sock.getLocalSocketAddress();
sock.close();
serverBuilder = NettyServerBuilder.forAddress(address);
channelBuilder = NettyChannelBuilder.forAddress(address);
}
if (serverExecutor == ExecutorType.DIRECT) {
serverBuilder.directExecutor();
}
if (clientExecutor == ExecutorType.DIRECT) {
channelBuilder.directExecutor();
}
// Always use a different worker group from the client.
ThreadFactory serverThreadFactory = new DefaultThreadFactory("STF pool", true);
serverBuilder.workerEventLoopGroup(new NioEventLoopGroup(0, serverThreadFactory));
// Always set connection and stream window size to same value
serverBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.flowControlWindow(windowSize.bytes());
channelBuilder.negotiationType(NegotiationType.PLAINTEXT);
serverBuilder.maxConcurrentCallsPerConnection(maxConcurrentStreams);
// Create buffers of the desired size for requests and responses.
PooledByteBufAllocator alloc = PooledByteBufAllocator.DEFAULT;
// Use a heap buffer for now, since MessageFramer doesn't know how to directly convert this
// into a WritableBuffer
// TODO(carl-mastrangelo): convert this into a regular buffer() call. See
// https://github.com/grpc/grpc-java/issues/2062#issuecomment-234646216
request = alloc.heapBuffer(requestSize.bytes());
request.writerIndex(request.capacity() - 1);
response = alloc.heapBuffer(responseSize.bytes());
response.writerIndex(response.capacity() - 1);
// Simple method that sends and receives NettyByteBuf
unaryMethod = MethodDescriptor.<ByteBuf, ByteBuf>newBuilder().setType(MethodType.UNARY).setFullMethodName("benchmark/unary").setRequestMarshaller(new ByteBufOutputMarshaller()).setResponseMarshaller(new ByteBufOutputMarshaller()).build();
pingPongMethod = unaryMethod.toBuilder().setType(MethodType.BIDI_STREAMING).setFullMethodName("benchmark/pingPong").build();
flowControlledStreaming = pingPongMethod.toBuilder().setFullMethodName("benchmark/flowControlledStreaming").build();
// Server implementation of unary & streaming methods
serverBuilder.addService(ServerServiceDefinition.builder(new ServiceDescriptor("benchmark", unaryMethod, pingPongMethod, flowControlledStreaming)).addMethod(unaryMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
// no-op
message.release();
call.sendMessage(response.slice());
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
}).addMethod(pingPongMethod, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
call.sendMessage(response.slice());
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
};
}
}).addMethod(flowControlledStreaming, new ServerCallHandler<ByteBuf, ByteBuf>() {
@Override
public ServerCall.Listener<ByteBuf> startCall(final ServerCall<ByteBuf, ByteBuf> call, Metadata headers) {
call.sendHeaders(new Metadata());
call.request(1);
return new ServerCall.Listener<ByteBuf>() {
@Override
public void onMessage(ByteBuf message) {
message.release();
while (call.isReady()) {
call.sendMessage(response.slice());
}
// Request next message
call.request(1);
}
@Override
public void onHalfClose() {
call.close(Status.OK, new Metadata());
}
@Override
public void onCancel() {
}
@Override
public void onComplete() {
}
@Override
public void onReady() {
while (call.isReady()) {
call.sendMessage(response.slice());
}
}
};
}
}).build());
// Build and start the clients and servers
server = serverBuilder.build();
server.start();
channels = new ManagedChannel[channelCount];
ThreadFactory clientThreadFactory = new DefaultThreadFactory("CTF pool", true);
for (int i = 0; i < channelCount; i++) {
// Use a dedicated event-loop for each channel
channels[i] = channelBuilder.eventLoopGroup(new NioEventLoopGroup(1, clientThreadFactory)).build();
}
}
Aggregations