use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.
the class GrpcFnServer method allocatePortAndCreateFor.
/**
* Create a {@link GrpcFnServer} for the provided {@link FnService} running on an arbitrary port.
*/
public static <ServiceT extends FnService> GrpcFnServer<ServiceT> allocatePortAndCreateFor(ServiceT service, ServerFactory factory) throws IOException {
ApiServiceDescriptor.Builder apiServiceDescriptor = ApiServiceDescriptor.newBuilder();
Server server = factory.allocateAddressAndCreate(ImmutableList.of(service), apiServiceDescriptor);
return new GrpcFnServer<>(server, service, apiServiceDescriptor.build());
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.
the class DataflowRunnerHarness method main.
/**
* Fetches and processes work units from the Dataflow service.
*/
public static void main(String[] unusedArgs) throws Exception {
RunnerApi.@Nullable Pipeline pipeline = DataflowWorkerHarnessHelper.getPipelineFromEnv();
// This descriptor is used for all services except logging. They are isolated to keep
// critical traffic protected from best effort traffic.
ApiServiceDescriptor controlApiService = DataflowWorkerHarnessHelper.getControlDescriptor();
ApiServiceDescriptor loggingApiService = DataflowWorkerHarnessHelper.getLoggingDescriptor();
ApiServiceDescriptor statusApiService = DataflowWorkerHarnessHelper.getStatusDescriptor();
LOG.info("{} started, using port {} for control, {} for logging.", DataflowRunnerHarness.class, controlApiService, loggingApiService);
DataflowWorkerHarnessHelper.initializeLogging(DataflowRunnerHarness.class);
DataflowWorkerHarnessOptions pipelineOptions = DataflowWorkerHarnessHelper.initializeGlobalStateAndPipelineOptions(DataflowRunnerHarness.class);
DataflowWorkerHarnessHelper.configureLogging(pipelineOptions);
// Initialized registered file systems.˜
FileSystems.setDefaultPipelineOptions(pipelineOptions);
DataflowPipelineDebugOptions dataflowOptions = pipelineOptions.as(DataflowPipelineDebugOptions.class);
ServerFactory serverFactory;
if (DataflowRunner.hasExperiment(dataflowOptions, "beam_fn_api_epoll_domain_socket")) {
serverFactory = ServerFactory.createEpollDomainSocket();
} else if (DataflowRunner.hasExperiment(dataflowOptions, "beam_fn_api_epoll")) {
serverFactory = ServerFactory.createEpollSocket();
} else {
serverFactory = ServerFactory.createDefault();
}
ServerStreamObserverFactory streamObserverFactory = ServerStreamObserverFactory.fromOptions(pipelineOptions);
Server servicesServer = null;
Server loggingServer = null;
Server statusServer = null;
try (BeamFnLoggingService beamFnLoggingService = new BeamFnLoggingService(loggingApiService, DataflowWorkerLoggingInitializer.getSdkLoggingHandler()::publish, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
BeamFnControlService beamFnControlService = new BeamFnControlService(controlApiService, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
BeamFnDataGrpcService beamFnDataService = new BeamFnDataGrpcService(pipelineOptions, controlApiService, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
BeamWorkerStatusGrpcService beamWorkerStatusGrpcService = statusApiService == null ? null : BeamWorkerStatusGrpcService.create(statusApiService, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
GrpcStateService beamFnStateService = GrpcStateService.create()) {
servicesServer = serverFactory.create(ImmutableList.of(beamFnControlService, beamFnDataService, beamFnStateService), controlApiService);
loggingServer = serverFactory.create(ImmutableList.of(beamFnLoggingService), loggingApiService);
// gRPC server for obtaining SDK harness runtime status information.
if (beamWorkerStatusGrpcService != null) {
statusServer = serverFactory.create(ImmutableList.of(beamWorkerStatusGrpcService), statusApiService);
}
start(pipeline, pipelineOptions, beamFnControlService, beamFnDataService, controlApiService, beamFnStateService, beamWorkerStatusGrpcService);
if (statusServer != null) {
statusServer.shutdown();
}
servicesServer.shutdown();
loggingServer.shutdown();
// wait 30 secs for outstanding requests to finish.
if (statusServer != null) {
statusServer.awaitTermination(30, TimeUnit.SECONDS);
}
servicesServer.awaitTermination(30, TimeUnit.SECONDS);
loggingServer.awaitTermination(30, TimeUnit.SECONDS);
} finally {
if (statusServer != null && !statusServer.isTerminated()) {
statusServer.shutdownNow();
}
if (servicesServer != null && !servicesServer.isTerminated()) {
servicesServer.shutdownNow();
}
if (loggingServer != null && !loggingServer.isTerminated()) {
loggingServer.shutdownNow();
}
}
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.
the class GrpcWindmillServerTest method testStreamingGetWork.
@Test
public void testStreamingGetWork() throws Exception {
// This fake server returns an infinite stream of identical WorkItems, obeying the request size
// limits set by the client.
serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {
@Override
public StreamObserver<StreamingGetWorkRequest> getWorkStream(StreamObserver<StreamingGetWorkResponseChunk> responseObserver) {
return new StreamObserver<StreamingGetWorkRequest>() {
boolean sawHeader = false;
ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver);
@Override
public void onNext(StreamingGetWorkRequest request) {
maybeInjectError(responseObserver);
try {
long maxItems;
if (!sawHeader) {
errorCollector.checkThat(request.getRequest(), Matchers.equalTo(GetWorkRequest.newBuilder().setClientId(10).setJobId("job").setProjectId("project").setWorkerId("worker").setMaxItems(3).setMaxBytes(10000).build()));
sawHeader = true;
maxItems = request.getRequest().getMaxItems();
} else {
maxItems = request.getRequestExtension().getMaxItems();
}
for (int item = 0; item < maxItems; item++) {
long id = ThreadLocalRandom.current().nextLong();
ByteString serializedResponse = WorkItem.newBuilder().setKey(ByteString.copyFromUtf8("somewhat_long_key")).setWorkToken(id).setShardingKey(id).build().toByteString();
// Break the WorkItem into smaller chunks to test chunking code.
for (int i = 0; i < serializedResponse.size(); i += 10) {
int end = Math.min(serializedResponse.size(), i + 10);
StreamingGetWorkResponseChunk.Builder builder = StreamingGetWorkResponseChunk.newBuilder().setStreamId(id).setSerializedWorkItem(serializedResponse.substring(i, end)).setRemainingBytesForWorkItem(serializedResponse.size() - end);
if (i == 0) {
builder.setComputationMetadata(ComputationWorkItemMetadata.newBuilder().setComputationId("comp").setDependentRealtimeInputWatermark(17000).setInputDataWatermark(18000));
}
try {
responseObserver.onNext(builder.build());
} catch (IllegalStateException e) {
// Client closed stream, we're done.
return;
}
}
}
} catch (Exception e) {
errorCollector.addError(e);
}
}
@Override
public void onError(Throwable throwable) {
}
@Override
public void onCompleted() {
injector.cancel();
responseObserver.onCompleted();
}
};
}
});
// Read the stream of WorkItems until 100 of them are received.
CountDownLatch latch = new CountDownLatch(100);
GetWorkStream stream = client.getWorkStream(GetWorkRequest.newBuilder().setClientId(10).setMaxItems(3).setMaxBytes(10000).build(), (String computation, @Nullable Instant inputDataWatermark, Instant synchronizedProcessingTime, Windmill.WorkItem workItem) -> {
latch.countDown();
assertEquals(inputDataWatermark, new Instant(18));
assertEquals(synchronizedProcessingTime, new Instant(17));
assertEquals(workItem.getKey(), ByteString.copyFromUtf8("somewhat_long_key"));
});
assertTrue(latch.await(30, TimeUnit.SECONDS));
stream.close();
assertTrue(stream.awaitTermination(30, TimeUnit.SECONDS));
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.
the class GrpcWindmillServerTest method testStreamingCommit.
@Test
public void testStreamingCommit() throws Exception {
List<WorkItemCommitRequest> commitRequestList = new ArrayList<>();
List<CountDownLatch> latches = new ArrayList<>();
Map<Long, WorkItemCommitRequest> commitRequests = new HashMap<>();
for (int i = 0; i < 500; ++i) {
// Build some requests of varying size with a few big ones.
WorkItemCommitRequest request = makeCommitRequest(i, i * (i < 480 ? 8 : 128));
commitRequestList.add(request);
commitRequests.put((long) i, request);
latches.add(new CountDownLatch(1));
}
Collections.shuffle(commitRequestList);
// This server receives WorkItemCommitRequests, and verifies they are equal to the above
// commitRequest.
serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {
@Override
public StreamObserver<StreamingCommitWorkRequest> commitWorkStream(StreamObserver<StreamingCommitResponse> responseObserver) {
return new StreamObserver<StreamingCommitWorkRequest>() {
boolean sawHeader = false;
InputStream buffer = null;
long remainingBytes = 0;
ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver);
@Override
public void onNext(StreamingCommitWorkRequest request) {
maybeInjectError(responseObserver);
if (!sawHeader) {
errorCollector.checkThat(request.getHeader(), Matchers.equalTo(JobHeader.newBuilder().setJobId("job").setProjectId("project").setWorkerId("worker").build()));
sawHeader = true;
LOG.info("Received header");
} else {
boolean first = true;
LOG.info("Received request with {} chunks", request.getCommitChunkCount());
for (StreamingCommitRequestChunk chunk : request.getCommitChunkList()) {
assertTrue(chunk.getSerializedWorkItemCommit().size() <= STREAM_CHUNK_SIZE);
if (first || chunk.hasComputationId()) {
errorCollector.checkThat(chunk.getComputationId(), Matchers.equalTo("computation"));
}
if (remainingBytes != 0) {
errorCollector.checkThat(buffer, Matchers.notNullValue());
errorCollector.checkThat(remainingBytes, Matchers.is(chunk.getSerializedWorkItemCommit().size() + chunk.getRemainingBytesForWorkItem()));
buffer = new SequenceInputStream(buffer, chunk.getSerializedWorkItemCommit().newInput());
} else {
errorCollector.checkThat(buffer, Matchers.nullValue());
buffer = chunk.getSerializedWorkItemCommit().newInput();
}
remainingBytes = chunk.getRemainingBytesForWorkItem();
if (remainingBytes == 0) {
try {
WorkItemCommitRequest received = WorkItemCommitRequest.parseFrom(buffer);
errorCollector.checkThat(received, Matchers.equalTo(commitRequests.get(received.getWorkToken())));
try {
responseObserver.onNext(StreamingCommitResponse.newBuilder().addRequestId(chunk.getRequestId()).build());
} catch (IllegalStateException e) {
// Stream is closed.
}
} catch (Exception e) {
errorCollector.addError(e);
}
buffer = null;
} else {
errorCollector.checkThat(first, Matchers.is(true));
}
first = false;
}
}
}
@Override
public void onError(Throwable throwable) {
}
@Override
public void onCompleted() {
injector.cancel();
responseObserver.onCompleted();
}
};
}
});
// Make the commit requests, waiting for each of them to be verified and acknowledged.
CommitWorkStream stream = client.commitWorkStream();
for (int i = 0; i < commitRequestList.size(); ) {
final CountDownLatch latch = latches.get(i);
if (stream.commitWorkItem("computation", commitRequestList.get(i), (CommitStatus status) -> {
assertEquals(status, CommitStatus.OK);
latch.countDown();
})) {
i++;
} else {
stream.flush();
}
}
stream.flush();
for (CountDownLatch latch : latches) {
assertTrue(latch.await(1, TimeUnit.MINUTES));
}
stream.close();
assertTrue(stream.awaitTermination(30, TimeUnit.SECONDS));
}
use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.
the class GrpcWindmillServerTest method setUp.
@Before
public void setUp() throws Exception {
String name = "Fake server for " + getClass();
this.server = InProcessServerBuilder.forName(name).fallbackHandlerRegistry(serviceRegistry).executor(Executors.newFixedThreadPool(1)).build().start();
this.client = GrpcWindmillServer.newTestInstance(name, true);
}
Aggregations