Search in sources :

Example 21 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class GrpcFnServer method allocatePortAndCreateFor.

/**
 * Create a {@link GrpcFnServer} for the provided {@link FnService} running on an arbitrary port.
 */
public static <ServiceT extends FnService> GrpcFnServer<ServiceT> allocatePortAndCreateFor(ServiceT service, ServerFactory factory) throws IOException {
    ApiServiceDescriptor.Builder apiServiceDescriptor = ApiServiceDescriptor.newBuilder();
    Server server = factory.allocateAddressAndCreate(ImmutableList.of(service), apiServiceDescriptor);
    return new GrpcFnServer<>(server, service, apiServiceDescriptor.build());
}
Also used : ApiServiceDescriptor(org.apache.beam.model.pipeline.v1.Endpoints.ApiServiceDescriptor) Server(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server)

Example 22 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class DataflowRunnerHarness method main.

/**
 * Fetches and processes work units from the Dataflow service.
 */
public static void main(String[] unusedArgs) throws Exception {
    RunnerApi.@Nullable Pipeline pipeline = DataflowWorkerHarnessHelper.getPipelineFromEnv();
    // This descriptor is used for all services except logging. They are isolated to keep
    // critical traffic protected from best effort traffic.
    ApiServiceDescriptor controlApiService = DataflowWorkerHarnessHelper.getControlDescriptor();
    ApiServiceDescriptor loggingApiService = DataflowWorkerHarnessHelper.getLoggingDescriptor();
    ApiServiceDescriptor statusApiService = DataflowWorkerHarnessHelper.getStatusDescriptor();
    LOG.info("{} started, using port {} for control, {} for logging.", DataflowRunnerHarness.class, controlApiService, loggingApiService);
    DataflowWorkerHarnessHelper.initializeLogging(DataflowRunnerHarness.class);
    DataflowWorkerHarnessOptions pipelineOptions = DataflowWorkerHarnessHelper.initializeGlobalStateAndPipelineOptions(DataflowRunnerHarness.class);
    DataflowWorkerHarnessHelper.configureLogging(pipelineOptions);
    // Initialized registered file systems.˜
    FileSystems.setDefaultPipelineOptions(pipelineOptions);
    DataflowPipelineDebugOptions dataflowOptions = pipelineOptions.as(DataflowPipelineDebugOptions.class);
    ServerFactory serverFactory;
    if (DataflowRunner.hasExperiment(dataflowOptions, "beam_fn_api_epoll_domain_socket")) {
        serverFactory = ServerFactory.createEpollDomainSocket();
    } else if (DataflowRunner.hasExperiment(dataflowOptions, "beam_fn_api_epoll")) {
        serverFactory = ServerFactory.createEpollSocket();
    } else {
        serverFactory = ServerFactory.createDefault();
    }
    ServerStreamObserverFactory streamObserverFactory = ServerStreamObserverFactory.fromOptions(pipelineOptions);
    Server servicesServer = null;
    Server loggingServer = null;
    Server statusServer = null;
    try (BeamFnLoggingService beamFnLoggingService = new BeamFnLoggingService(loggingApiService, DataflowWorkerLoggingInitializer.getSdkLoggingHandler()::publish, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
        BeamFnControlService beamFnControlService = new BeamFnControlService(controlApiService, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
        BeamFnDataGrpcService beamFnDataService = new BeamFnDataGrpcService(pipelineOptions, controlApiService, streamObserverFactory::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
        BeamWorkerStatusGrpcService beamWorkerStatusGrpcService = statusApiService == null ? null : BeamWorkerStatusGrpcService.create(statusApiService, GrpcContextHeaderAccessorProvider.getHeaderAccessor());
        GrpcStateService beamFnStateService = GrpcStateService.create()) {
        servicesServer = serverFactory.create(ImmutableList.of(beamFnControlService, beamFnDataService, beamFnStateService), controlApiService);
        loggingServer = serverFactory.create(ImmutableList.of(beamFnLoggingService), loggingApiService);
        // gRPC server for obtaining SDK harness runtime status information.
        if (beamWorkerStatusGrpcService != null) {
            statusServer = serverFactory.create(ImmutableList.of(beamWorkerStatusGrpcService), statusApiService);
        }
        start(pipeline, pipelineOptions, beamFnControlService, beamFnDataService, controlApiService, beamFnStateService, beamWorkerStatusGrpcService);
        if (statusServer != null) {
            statusServer.shutdown();
        }
        servicesServer.shutdown();
        loggingServer.shutdown();
        // wait 30 secs for outstanding requests to finish.
        if (statusServer != null) {
            statusServer.awaitTermination(30, TimeUnit.SECONDS);
        }
        servicesServer.awaitTermination(30, TimeUnit.SECONDS);
        loggingServer.awaitTermination(30, TimeUnit.SECONDS);
    } finally {
        if (statusServer != null && !statusServer.isTerminated()) {
            statusServer.shutdownNow();
        }
        if (servicesServer != null && !servicesServer.isTerminated()) {
            servicesServer.shutdownNow();
        }
        if (loggingServer != null && !loggingServer.isTerminated()) {
            loggingServer.shutdownNow();
        }
    }
}
Also used : GrpcStateService(org.apache.beam.runners.fnexecution.state.GrpcStateService) ApiServiceDescriptor(org.apache.beam.model.pipeline.v1.Endpoints.ApiServiceDescriptor) Server(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server) DataflowWorkerHarnessOptions(org.apache.beam.runners.dataflow.options.DataflowWorkerHarnessOptions) ServerFactory(org.apache.beam.sdk.fn.server.ServerFactory) BeamFnControlService(org.apache.beam.runners.dataflow.worker.fn.BeamFnControlService) ServerStreamObserverFactory(org.apache.beam.runners.dataflow.worker.fn.stream.ServerStreamObserverFactory) BeamFnDataGrpcService(org.apache.beam.runners.dataflow.worker.fn.data.BeamFnDataGrpcService) RunnerApi(org.apache.beam.model.pipeline.v1.RunnerApi) BeamFnLoggingService(org.apache.beam.runners.dataflow.worker.fn.logging.BeamFnLoggingService) DataflowPipelineDebugOptions(org.apache.beam.runners.dataflow.options.DataflowPipelineDebugOptions) BeamWorkerStatusGrpcService(org.apache.beam.runners.fnexecution.status.BeamWorkerStatusGrpcService) Nullable(org.checkerframework.checker.nullness.qual.Nullable)

Example 23 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class GrpcWindmillServerTest method testStreamingGetWork.

@Test
public void testStreamingGetWork() throws Exception {
    // This fake server returns an infinite stream of identical WorkItems, obeying the request size
    // limits set by the client.
    serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {

        @Override
        public StreamObserver<StreamingGetWorkRequest> getWorkStream(StreamObserver<StreamingGetWorkResponseChunk> responseObserver) {
            return new StreamObserver<StreamingGetWorkRequest>() {

                boolean sawHeader = false;

                ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver);

                @Override
                public void onNext(StreamingGetWorkRequest request) {
                    maybeInjectError(responseObserver);
                    try {
                        long maxItems;
                        if (!sawHeader) {
                            errorCollector.checkThat(request.getRequest(), Matchers.equalTo(GetWorkRequest.newBuilder().setClientId(10).setJobId("job").setProjectId("project").setWorkerId("worker").setMaxItems(3).setMaxBytes(10000).build()));
                            sawHeader = true;
                            maxItems = request.getRequest().getMaxItems();
                        } else {
                            maxItems = request.getRequestExtension().getMaxItems();
                        }
                        for (int item = 0; item < maxItems; item++) {
                            long id = ThreadLocalRandom.current().nextLong();
                            ByteString serializedResponse = WorkItem.newBuilder().setKey(ByteString.copyFromUtf8("somewhat_long_key")).setWorkToken(id).setShardingKey(id).build().toByteString();
                            // Break the WorkItem into smaller chunks to test chunking code.
                            for (int i = 0; i < serializedResponse.size(); i += 10) {
                                int end = Math.min(serializedResponse.size(), i + 10);
                                StreamingGetWorkResponseChunk.Builder builder = StreamingGetWorkResponseChunk.newBuilder().setStreamId(id).setSerializedWorkItem(serializedResponse.substring(i, end)).setRemainingBytesForWorkItem(serializedResponse.size() - end);
                                if (i == 0) {
                                    builder.setComputationMetadata(ComputationWorkItemMetadata.newBuilder().setComputationId("comp").setDependentRealtimeInputWatermark(17000).setInputDataWatermark(18000));
                                }
                                try {
                                    responseObserver.onNext(builder.build());
                                } catch (IllegalStateException e) {
                                    // Client closed stream, we're done.
                                    return;
                                }
                            }
                        }
                    } catch (Exception e) {
                        errorCollector.addError(e);
                    }
                }

                @Override
                public void onError(Throwable throwable) {
                }

                @Override
                public void onCompleted() {
                    injector.cancel();
                    responseObserver.onCompleted();
                }
            };
        }
    });
    // Read the stream of WorkItems until 100 of them are received.
    CountDownLatch latch = new CountDownLatch(100);
    GetWorkStream stream = client.getWorkStream(GetWorkRequest.newBuilder().setClientId(10).setMaxItems(3).setMaxBytes(10000).build(), (String computation, @Nullable Instant inputDataWatermark, Instant synchronizedProcessingTime, Windmill.WorkItem workItem) -> {
        latch.countDown();
        assertEquals(inputDataWatermark, new Instant(18));
        assertEquals(synchronizedProcessingTime, new Instant(17));
        assertEquals(workItem.getKey(), ByteString.copyFromUtf8("somewhat_long_key"));
    });
    assertTrue(latch.await(30, TimeUnit.SECONDS));
    stream.close();
    assertTrue(stream.awaitTermination(30, TimeUnit.SECONDS));
}
Also used : StreamObserver(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver) CloudWindmillServiceV1Alpha1ImplBase(org.apache.beam.runners.dataflow.worker.windmill.CloudWindmillServiceV1Alpha1Grpc.CloudWindmillServiceV1Alpha1ImplBase) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) InProcessServerBuilder(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.inprocess.InProcessServerBuilder) Instant(org.joda.time.Instant) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) CountDownLatch(java.util.concurrent.CountDownLatch) GetWorkStream(org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.GetWorkStream) WorkItem(org.apache.beam.runners.dataflow.worker.windmill.Windmill.WorkItem) StatusRuntimeException(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.StatusRuntimeException) StreamingGetWorkResponseChunk(org.apache.beam.runners.dataflow.worker.windmill.Windmill.StreamingGetWorkResponseChunk) StreamingGetWorkRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.StreamingGetWorkRequest) Nullable(org.checkerframework.checker.nullness.qual.Nullable) Test(org.junit.Test)

Example 24 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class GrpcWindmillServerTest method testStreamingCommit.

@Test
public void testStreamingCommit() throws Exception {
    List<WorkItemCommitRequest> commitRequestList = new ArrayList<>();
    List<CountDownLatch> latches = new ArrayList<>();
    Map<Long, WorkItemCommitRequest> commitRequests = new HashMap<>();
    for (int i = 0; i < 500; ++i) {
        // Build some requests of varying size with a few big ones.
        WorkItemCommitRequest request = makeCommitRequest(i, i * (i < 480 ? 8 : 128));
        commitRequestList.add(request);
        commitRequests.put((long) i, request);
        latches.add(new CountDownLatch(1));
    }
    Collections.shuffle(commitRequestList);
    // This server receives WorkItemCommitRequests, and verifies they are equal to the above
    // commitRequest.
    serviceRegistry.addService(new CloudWindmillServiceV1Alpha1ImplBase() {

        @Override
        public StreamObserver<StreamingCommitWorkRequest> commitWorkStream(StreamObserver<StreamingCommitResponse> responseObserver) {
            return new StreamObserver<StreamingCommitWorkRequest>() {

                boolean sawHeader = false;

                InputStream buffer = null;

                long remainingBytes = 0;

                ResponseErrorInjector injector = new ResponseErrorInjector(responseObserver);

                @Override
                public void onNext(StreamingCommitWorkRequest request) {
                    maybeInjectError(responseObserver);
                    if (!sawHeader) {
                        errorCollector.checkThat(request.getHeader(), Matchers.equalTo(JobHeader.newBuilder().setJobId("job").setProjectId("project").setWorkerId("worker").build()));
                        sawHeader = true;
                        LOG.info("Received header");
                    } else {
                        boolean first = true;
                        LOG.info("Received request with {} chunks", request.getCommitChunkCount());
                        for (StreamingCommitRequestChunk chunk : request.getCommitChunkList()) {
                            assertTrue(chunk.getSerializedWorkItemCommit().size() <= STREAM_CHUNK_SIZE);
                            if (first || chunk.hasComputationId()) {
                                errorCollector.checkThat(chunk.getComputationId(), Matchers.equalTo("computation"));
                            }
                            if (remainingBytes != 0) {
                                errorCollector.checkThat(buffer, Matchers.notNullValue());
                                errorCollector.checkThat(remainingBytes, Matchers.is(chunk.getSerializedWorkItemCommit().size() + chunk.getRemainingBytesForWorkItem()));
                                buffer = new SequenceInputStream(buffer, chunk.getSerializedWorkItemCommit().newInput());
                            } else {
                                errorCollector.checkThat(buffer, Matchers.nullValue());
                                buffer = chunk.getSerializedWorkItemCommit().newInput();
                            }
                            remainingBytes = chunk.getRemainingBytesForWorkItem();
                            if (remainingBytes == 0) {
                                try {
                                    WorkItemCommitRequest received = WorkItemCommitRequest.parseFrom(buffer);
                                    errorCollector.checkThat(received, Matchers.equalTo(commitRequests.get(received.getWorkToken())));
                                    try {
                                        responseObserver.onNext(StreamingCommitResponse.newBuilder().addRequestId(chunk.getRequestId()).build());
                                    } catch (IllegalStateException e) {
                                    // Stream is closed.
                                    }
                                } catch (Exception e) {
                                    errorCollector.addError(e);
                                }
                                buffer = null;
                            } else {
                                errorCollector.checkThat(first, Matchers.is(true));
                            }
                            first = false;
                        }
                    }
                }

                @Override
                public void onError(Throwable throwable) {
                }

                @Override
                public void onCompleted() {
                    injector.cancel();
                    responseObserver.onCompleted();
                }
            };
        }
    });
    // Make the commit requests, waiting for each of them to be verified and acknowledged.
    CommitWorkStream stream = client.commitWorkStream();
    for (int i = 0; i < commitRequestList.size(); ) {
        final CountDownLatch latch = latches.get(i);
        if (stream.commitWorkItem("computation", commitRequestList.get(i), (CommitStatus status) -> {
            assertEquals(status, CommitStatus.OK);
            latch.countDown();
        })) {
            i++;
        } else {
            stream.flush();
        }
    }
    stream.flush();
    for (CountDownLatch latch : latches) {
        assertTrue(latch.await(1, TimeUnit.MINUTES));
    }
    stream.close();
    assertTrue(stream.awaitTermination(30, TimeUnit.SECONDS));
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StreamingCommitWorkRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.StreamingCommitWorkRequest) CommitWorkStream(org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.CommitWorkStream) StreamingCommitRequestChunk(org.apache.beam.runners.dataflow.worker.windmill.Windmill.StreamingCommitRequestChunk) CommitStatus(org.apache.beam.runners.dataflow.worker.windmill.Windmill.CommitStatus) StreamObserver(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver) CloudWindmillServiceV1Alpha1ImplBase(org.apache.beam.runners.dataflow.worker.windmill.CloudWindmillServiceV1Alpha1Grpc.CloudWindmillServiceV1Alpha1ImplBase) SequenceInputStream(java.io.SequenceInputStream) InputStream(java.io.InputStream) CountDownLatch(java.util.concurrent.CountDownLatch) StatusRuntimeException(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.StatusRuntimeException) SequenceInputStream(java.io.SequenceInputStream) WorkItemCommitRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.WorkItemCommitRequest) StreamingCommitResponse(org.apache.beam.runners.dataflow.worker.windmill.Windmill.StreamingCommitResponse) Test(org.junit.Test)

Example 25 with Server

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server in project beam by apache.

the class GrpcWindmillServerTest method setUp.

@Before
public void setUp() throws Exception {
    String name = "Fake server for " + getClass();
    this.server = InProcessServerBuilder.forName(name).fallbackHandlerRegistry(serviceRegistry).executor(Executors.newFixedThreadPool(1)).build().start();
    this.client = GrpcWindmillServer.newTestInstance(name, true);
}
Also used : ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Before(org.junit.Before)

Aggregations

Test (org.junit.Test)76 Server (io.grpc.Server)68 IOException (java.io.IOException)27 ByteString (org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString)24 ArrayList (java.util.ArrayList)21 StreamObserver (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver)21 ExecutionException (java.util.concurrent.ExecutionException)20 TimeoutException (java.util.concurrent.TimeoutException)20 CountDownLatch (java.util.concurrent.CountDownLatch)19 ManagedChannel (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel)19 Server (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.Server)18 StatusException (io.grpc.StatusException)17 Metadata (io.grpc.Metadata)12 List (java.util.List)12 BeamFnApi (org.apache.beam.model.fnexecution.v1.BeamFnApi)12 FilterChain (io.grpc.xds.EnvoyServerProtoData.FilterChain)11 ExecutorService (java.util.concurrent.ExecutorService)11 ParallelInstruction (com.google.api.services.dataflow.model.ParallelInstruction)10 ServerCall (io.grpc.ServerCall)10 StreamObserver (io.grpc.stub.StreamObserver)10