use of io.grpc.stub.ClientResponseObserver in project pinpoint by naver.
the class HelloWorldStreamClient method greet.
/**
* Say hello to server.
*/
public void greet(final int callCount) throws InterruptedException {
final CountDownLatch done = new CountDownLatch(1);
ClientResponseObserver<io.grpc.examples.manualflowcontrol.HelloRequest, io.grpc.examples.manualflowcontrol.HelloReply> clientResponseObserver = new ClientResponseObserver<io.grpc.examples.manualflowcontrol.HelloRequest, io.grpc.examples.manualflowcontrol.HelloReply>() {
ClientCallStreamObserver<io.grpc.examples.manualflowcontrol.HelloRequest> requestStream;
@Override
public void beforeStart(final ClientCallStreamObserver<io.grpc.examples.manualflowcontrol.HelloRequest> requestStream) {
this.requestStream = requestStream;
// Set up manual flow control for the response stream. It feels backwards to configure the response
// stream's flow control using the request stream's observer, but this is the way it is.
requestStream.disableAutoInboundFlowControl();
// Set up a back-pressure-aware producer for the request stream. The onReadyHandler will be invoked
// when the consuming side has enough buffer space to receive more messages.
//
// Messages are serialized into a transport-specific transmit buffer. Depending on the size of this buffer,
// MANY messages may be buffered, however, they haven't yet been sent to the server. The server must call
// request() to pull a buffered message from the client.
//
// Note: the onReadyHandler's invocation is serialized on the same thread pool as the incoming
// StreamObserver'sonNext(), onError(), and onComplete() handlers. Blocking the onReadyHandler will prevent
// additional messages from being processed by the incoming StreamObserver. The onReadyHandler must return
// in a timely manor or else message processing throughput will suffer.
requestStream.setOnReadyHandler(new Runnable() {
// An iterator is used so we can pause and resume iteration of the request data.
Iterator<String> iterator = names().iterator();
@Override
public void run() {
int count = 0;
// Start generating values from where we left off on a non-gRPC thread.
while (requestStream.isReady()) {
if (iterator.hasNext() && callCount > count) {
// Send more messages if there are more messages to send.
String name = iterator.next();
logger.info("--> " + name);
io.grpc.examples.manualflowcontrol.HelloRequest request = io.grpc.examples.manualflowcontrol.HelloRequest.newBuilder().setName(name).build();
requestStream.onNext(request);
count++;
} else {
// Signal completion if there is nothing left to send.
requestStream.onCompleted();
}
}
}
});
}
@Override
public void onNext(io.grpc.examples.manualflowcontrol.HelloReply value) {
logger.info("<-- " + value.getMessage());
// Signal the sender to send one message.
requestStream.request(1);
}
@Override
public void onError(Throwable t) {
t.printStackTrace();
done.countDown();
}
@Override
public void onCompleted() {
logger.info("All Done");
done.countDown();
}
};
// Note: clientResponseObserver is handling both request and response stream processing.
stub.sayHelloStreaming(clientResponseObserver);
done.await();
}
use of io.grpc.stub.ClientResponseObserver in project grpc-java by grpc.
the class ManualFlowControlClient method main.
public static void main(String[] args) throws InterruptedException {
final CountDownLatch done = new CountDownLatch(1);
// Create a channel and a stub
ManagedChannel channel = ManagedChannelBuilder.forAddress("localhost", 50051).usePlaintext().build();
StreamingGreeterGrpc.StreamingGreeterStub stub = StreamingGreeterGrpc.newStub(channel);
// When using manual flow-control and back-pressure on the client, the ClientResponseObserver handles both
// request and response streams.
ClientResponseObserver<HelloRequest, HelloReply> clientResponseObserver = new ClientResponseObserver<HelloRequest, HelloReply>() {
ClientCallStreamObserver<HelloRequest> requestStream;
@Override
public void beforeStart(final ClientCallStreamObserver<HelloRequest> requestStream) {
this.requestStream = requestStream;
// Set up manual flow control for the response stream. It feels backwards to configure the response
// stream's flow control using the request stream's observer, but this is the way it is.
requestStream.disableAutoRequestWithInitial(1);
// Set up a back-pressure-aware producer for the request stream. The onReadyHandler will be invoked
// when the consuming side has enough buffer space to receive more messages.
//
// Messages are serialized into a transport-specific transmit buffer. Depending on the size of this buffer,
// MANY messages may be buffered, however, they haven't yet been sent to the server. The server must call
// request() to pull a buffered message from the client.
//
// Note: the onReadyHandler's invocation is serialized on the same thread pool as the incoming
// StreamObserver's onNext(), onError(), and onComplete() handlers. Blocking the onReadyHandler will prevent
// additional messages from being processed by the incoming StreamObserver. The onReadyHandler must return
// in a timely manner or else message processing throughput will suffer.
requestStream.setOnReadyHandler(new Runnable() {
// An iterator is used so we can pause and resume iteration of the request data.
Iterator<String> iterator = names().iterator();
@Override
public void run() {
// Start generating values from where we left off on a non-gRPC thread.
while (requestStream.isReady()) {
if (iterator.hasNext()) {
// Send more messages if there are more messages to send.
String name = iterator.next();
logger.info("--> " + name);
HelloRequest request = HelloRequest.newBuilder().setName(name).build();
requestStream.onNext(request);
} else {
// Signal completion if there is nothing left to send.
requestStream.onCompleted();
}
}
}
});
}
@Override
public void onNext(HelloReply value) {
logger.info("<-- " + value.getMessage());
// Signal the sender to send one message.
requestStream.request(1);
}
@Override
public void onError(Throwable t) {
t.printStackTrace();
done.countDown();
}
@Override
public void onCompleted() {
logger.info("All Done");
done.countDown();
}
};
// Note: clientResponseObserver is handling both request and response stream processing.
stub.sayHelloStreaming(clientResponseObserver);
done.await();
channel.shutdown();
channel.awaitTermination(1, TimeUnit.SECONDS);
}
Aggregations