Search in sources :

Example 41 with ManagedChannel

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel in project beam by apache.

the class BeamFnDataGrpcServiceTest method testMessageReceivedBySingleClientWhenThereAreMultipleClients.

@Test
public void testMessageReceivedBySingleClientWhenThereAreMultipleClients() throws Exception {
    BlockingQueue<Elements> clientInboundElements = new LinkedBlockingQueue<>();
    ExecutorService executorService = Executors.newCachedThreadPool();
    CountDownLatch waitForInboundElements = new CountDownLatch(1);
    int numberOfClients = 3;
    for (int client = 0; client < numberOfClients; ++client) {
        executorService.submit(() -> {
            ManagedChannel channel = ManagedChannelFactory.createDefault().withInterceptors(Arrays.asList(AddHarnessIdInterceptor.create(WORKER_ID))).forDescriptor(service.getApiServiceDescriptor());
            StreamObserver<BeamFnApi.Elements> outboundObserver = BeamFnDataGrpc.newStub(channel).data(TestStreams.withOnNext(clientInboundElements::add).build());
            waitForInboundElements.await();
            outboundObserver.onCompleted();
            return null;
        });
    }
    for (int i = 0; i < 3; ++i) {
        CloseableFnDataReceiver<WindowedValue<String>> consumer = service.getDataService(WORKER_ID).send(LogicalEndpoint.data(Integer.toString(i), TRANSFORM_ID), CODER);
        consumer.accept(valueInGlobalWindow("A" + i));
        consumer.accept(valueInGlobalWindow("B" + i));
        consumer.accept(valueInGlobalWindow("C" + i));
        consumer.close();
    }
    // Specifically copy the elements to a new list so we perform blocking calls on the queue
    // to ensure the elements arrive.
    List<Elements> copy = new ArrayList<>();
    for (int i = 0; i < numberOfClients; ++i) {
        copy.add(clientInboundElements.take());
    }
    assertThat(copy, containsInAnyOrder(elementsWithData("0"), elementsWithData("1"), elementsWithData("2")));
    waitForInboundElements.countDown();
}
Also used : ArrayList(java.util.ArrayList) Elements(org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) CountDownLatch(java.util.concurrent.CountDownLatch) LogicalEndpoint(org.apache.beam.sdk.fn.data.LogicalEndpoint) WindowedValue(org.apache.beam.sdk.util.WindowedValue) ExecutorService(java.util.concurrent.ExecutorService) ManagedChannel(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel) Test(org.junit.Test)

Example 42 with ManagedChannel

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel in project beam by apache.

the class BeamFnDataGrpcServiceTest method testMultipleClientsSendMessagesAreDirectedToProperConsumers.

@Test
public void testMultipleClientsSendMessagesAreDirectedToProperConsumers() throws Exception {
    LinkedBlockingQueue<BeamFnApi.Elements> clientInboundElements = new LinkedBlockingQueue<>();
    ExecutorService executorService = Executors.newCachedThreadPool();
    CountDownLatch waitForInboundElements = new CountDownLatch(1);
    for (int i = 0; i < 3; ++i) {
        String instructionId = Integer.toString(i);
        executorService.submit(() -> {
            ManagedChannel channel = ManagedChannelFactory.createDefault().withInterceptors(Arrays.asList(AddHarnessIdInterceptor.create(WORKER_ID))).forDescriptor(service.getApiServiceDescriptor());
            StreamObserver<BeamFnApi.Elements> outboundObserver = BeamFnDataGrpc.newStub(channel).data(TestStreams.withOnNext(clientInboundElements::add).build());
            outboundObserver.onNext(elementsWithData(instructionId));
            waitForInboundElements.await();
            outboundObserver.onCompleted();
            return null;
        });
    }
    List<Collection<WindowedValue<String>>> serverInboundValues = new ArrayList<>();
    Collection<InboundDataClient> inboundDataClients = new ArrayList<>();
    for (int i = 0; i < 3; ++i) {
        BlockingQueue<WindowedValue<String>> serverInboundValue = new LinkedBlockingQueue<>();
        serverInboundValues.add(serverInboundValue);
        inboundDataClients.add(service.getDataService(WORKER_ID).receive(LogicalEndpoint.data(Integer.toString(i), TRANSFORM_ID), CODER, serverInboundValue::add));
    }
    // Waiting for the client provides the necessary synchronization for the elements to arrive.
    for (InboundDataClient inboundDataClient : inboundDataClients) {
        inboundDataClient.awaitCompletion();
    }
    waitForInboundElements.countDown();
    for (int i = 0; i < 3; ++i) {
        assertThat(serverInboundValues.get(i), contains(valueInGlobalWindow("A" + i), valueInGlobalWindow("B" + i), valueInGlobalWindow("C" + i)));
    }
    assertThat(clientInboundElements, empty());
}
Also used : ArrayList(java.util.ArrayList) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) Elements(org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) CountDownLatch(java.util.concurrent.CountDownLatch) LogicalEndpoint(org.apache.beam.sdk.fn.data.LogicalEndpoint) InboundDataClient(org.apache.beam.sdk.fn.data.InboundDataClient) WindowedValue(org.apache.beam.sdk.util.WindowedValue) ExecutorService(java.util.concurrent.ExecutorService) ManagedChannel(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel) Collection(java.util.Collection) Test(org.junit.Test)

Example 43 with ManagedChannel

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel in project beam by apache.

the class BeamFnLoggingServiceTest method testMultipleClientsFailingIsHandledGracefullyByServer.

@Test(timeout = 5000)
public void testMultipleClientsFailingIsHandledGracefullyByServer() throws Exception {
    Collection<Callable<Void>> tasks = new ArrayList<>();
    ConcurrentLinkedQueue<BeamFnApi.LogEntry> logs = new ConcurrentLinkedQueue<>();
    try (BeamFnLoggingService service = new BeamFnLoggingService(findOpenPort(), logs::add, ServerStreamObserverFactory.fromOptions(PipelineOptionsFactory.create())::from, GrpcContextHeaderAccessorProvider.getHeaderAccessor())) {
        server = ServerFactory.createDefault().create(Arrays.asList(service), service.getApiServiceDescriptor());
        CountDownLatch waitForTermination = new CountDownLatch(3);
        final BlockingQueue<StreamObserver<List>> outboundObservers = new LinkedBlockingQueue<>();
        for (int i = 1; i <= 3; ++i) {
            int instructionId = i;
            tasks.add(() -> {
                ManagedChannel channel = ManagedChannelFactory.createDefault().withInterceptors(Arrays.asList(AddHarnessIdInterceptor.create(WORKER_ID + instructionId))).forDescriptor(service.getApiServiceDescriptor());
                StreamObserver<BeamFnApi.LogEntry.List> outboundObserver = BeamFnLoggingGrpc.newStub(channel).logging(TestStreams.withOnNext(BeamFnLoggingServiceTest::discardMessage).withOnError(waitForTermination::countDown).build());
                outboundObserver.onNext(createLogsWithIds(instructionId, -instructionId));
                outboundObservers.add(outboundObserver);
                return null;
            });
        }
        ExecutorService executorService = Executors.newCachedThreadPool();
        executorService.invokeAll(tasks);
        for (int i = 1; i <= 3; ++i) {
            outboundObservers.take().onError(new RuntimeException("Client " + i));
        }
        waitForTermination.await();
    }
}
Also used : StreamObserver(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver) BeamFnApi(org.apache.beam.model.fnexecution.v1.BeamFnApi) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Callable(java.util.concurrent.Callable) ExecutorService(java.util.concurrent.ExecutorService) ManagedChannel(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel) ArrayList(java.util.ArrayList) List(org.apache.beam.model.fnexecution.v1.BeamFnApi.LogEntry.List) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Test(org.junit.Test)

Example 44 with ManagedChannel

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel in project beam by apache.

the class PortableRunner method run.

@Override
public PipelineResult run(Pipeline pipeline) {
    Runnable cleanup;
    if (Environments.ENVIRONMENT_LOOPBACK.equals(options.as(PortablePipelineOptions.class).getDefaultEnvironmentType())) {
        GrpcFnServer<ExternalWorkerService> workerService;
        try {
            workerService = new ExternalWorkerService(options).start();
        } catch (Exception exn) {
            throw new RuntimeException("Failed to start GrpcFnServer for ExternalWorkerService", exn);
        }
        LOG.info("Starting worker service at {}", workerService.getApiServiceDescriptor().getUrl());
        options.as(PortablePipelineOptions.class).setDefaultEnvironmentConfig(workerService.getApiServiceDescriptor().getUrl());
        cleanup = () -> {
            try {
                LOG.warn("closing worker service {}", workerService);
                workerService.close();
            } catch (Exception exn) {
                throw new RuntimeException(exn);
            }
        };
    } else {
        cleanup = null;
    }
    ImmutableList.Builder<String> filesToStageBuilder = ImmutableList.builder();
    List<String> stagingFiles = options.as(PortablePipelineOptions.class).getFilesToStage();
    if (stagingFiles == null) {
        List<String> classpathResources = detectClassPathResourcesToStage(Environments.class.getClassLoader(), options);
        if (classpathResources.isEmpty()) {
            throw new IllegalArgumentException("No classpath elements found.");
        }
        LOG.debug("PortablePipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: {}", classpathResources.size());
        filesToStageBuilder.addAll(classpathResources);
    } else {
        filesToStageBuilder.addAll(stagingFiles);
    }
    // TODO(heejong): remove jar_packages experimental flag when cross-language dependency
    // management is implemented for all runners.
    List<String> experiments = options.as(ExperimentalOptions.class).getExperiments();
    if (experiments != null) {
        Optional<String> jarPackages = experiments.stream().filter((String flag) -> flag.startsWith("jar_packages=")).findFirst();
        jarPackages.ifPresent(s -> filesToStageBuilder.addAll(Arrays.asList(s.replaceFirst("jar_packages=", "").split(","))));
    }
    options.as(PortablePipelineOptions.class).setFilesToStage(filesToStageBuilder.build());
    RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(pipeline, SdkComponents.create(options));
    pipelineProto = DefaultArtifactResolver.INSTANCE.resolveArtifacts(pipelineProto);
    PrepareJobRequest prepareJobRequest = PrepareJobRequest.newBuilder().setJobName(options.getJobName()).setPipeline(pipelineProto).setPipelineOptions(PipelineOptionsTranslation.toProto(options)).build();
    LOG.info("Using job server endpoint: {}", endpoint);
    ManagedChannel jobServiceChannel = channelFactory.forDescriptor(ApiServiceDescriptor.newBuilder().setUrl(endpoint).build());
    JobServiceBlockingStub jobService = JobServiceGrpc.newBlockingStub(jobServiceChannel);
    try (CloseableResource<JobServiceBlockingStub> wrappedJobService = CloseableResource.of(jobService, unused -> jobServiceChannel.shutdown())) {
        final int jobServerTimeout = options.as(PortablePipelineOptions.class).getJobServerTimeout();
        PrepareJobResponse prepareJobResponse = jobService.withDeadlineAfter(jobServerTimeout, TimeUnit.SECONDS).withWaitForReady().prepare(prepareJobRequest);
        LOG.info("PrepareJobResponse: {}", prepareJobResponse);
        ApiServiceDescriptor artifactStagingEndpoint = prepareJobResponse.getArtifactStagingEndpoint();
        String stagingSessionToken = prepareJobResponse.getStagingSessionToken();
        try (CloseableResource<ManagedChannel> artifactChannel = CloseableResource.of(channelFactory.forDescriptor(artifactStagingEndpoint), ManagedChannel::shutdown)) {
            ArtifactStagingService.offer(new ArtifactRetrievalService(), ArtifactStagingServiceGrpc.newStub(artifactChannel.get()), stagingSessionToken);
        } catch (CloseableResource.CloseException e) {
            LOG.warn("Error closing artifact staging channel", e);
        // CloseExceptions should only be thrown while closing the channel.
        } catch (Exception e) {
            throw new RuntimeException("Error staging files.", e);
        }
        RunJobRequest runJobRequest = RunJobRequest.newBuilder().setPreparationId(prepareJobResponse.getPreparationId()).build();
        // Run the job and wait for a result, we don't set a timeout here because
        // it may take a long time for a job to complete and streaming
        // jobs never return a response.
        RunJobResponse runJobResponse = jobService.run(runJobRequest);
        LOG.info("RunJobResponse: {}", runJobResponse);
        ByteString jobId = runJobResponse.getJobIdBytes();
        return new JobServicePipelineResult(jobId, jobServerTimeout, wrappedJobService.transfer(), cleanup);
    } catch (CloseException e) {
        throw new RuntimeException(e);
    }
}
Also used : JobServiceBlockingStub(org.apache.beam.model.jobmanagement.v1.JobServiceGrpc.JobServiceBlockingStub) PrepareJobResponse(org.apache.beam.model.jobmanagement.v1.JobApi.PrepareJobResponse) ApiServiceDescriptor(org.apache.beam.model.pipeline.v1.Endpoints.ApiServiceDescriptor) ImmutableList(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) CloseException(org.apache.beam.runners.portability.CloseableResource.CloseException) ExperimentalOptions(org.apache.beam.sdk.options.ExperimentalOptions) ArtifactRetrievalService(org.apache.beam.runners.fnexecution.artifact.ArtifactRetrievalService) RunJobResponse(org.apache.beam.model.jobmanagement.v1.JobApi.RunJobResponse) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) RunnerApi(org.apache.beam.model.pipeline.v1.RunnerApi) RunJobRequest(org.apache.beam.model.jobmanagement.v1.JobApi.RunJobRequest) ExternalWorkerService(org.apache.beam.fn.harness.ExternalWorkerService) PrepareJobRequest(org.apache.beam.model.jobmanagement.v1.JobApi.PrepareJobRequest) ManagedChannel(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel) CloseException(org.apache.beam.runners.portability.CloseableResource.CloseException) CloseException(org.apache.beam.runners.portability.CloseableResource.CloseException) Environments(org.apache.beam.runners.core.construction.Environments) PortablePipelineOptions(org.apache.beam.sdk.options.PortablePipelineOptions)

Example 45 with ManagedChannel

use of org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel in project beam by apache.

the class GrpcLoggingServiceTest method testMultipleClientsSuccessfullyProcessed.

@Test
public void testMultipleClientsSuccessfullyProcessed() throws Exception {
    ConcurrentLinkedQueue<BeamFnApi.LogEntry> logs = new ConcurrentLinkedQueue<>();
    GrpcLoggingService service = GrpcLoggingService.forWriter(new CollectionAppendingLogWriter(logs));
    try (GrpcFnServer<GrpcLoggingService> server = GrpcFnServer.allocatePortAndCreateFor(service, InProcessServerFactory.create())) {
        Collection<Callable<Void>> tasks = new ArrayList<>();
        for (int i = 1; i <= 3; ++i) {
            final int instructionId = i;
            tasks.add(() -> {
                CountDownLatch waitForServerHangup = new CountDownLatch(1);
                String url = server.getApiServiceDescriptor().getUrl();
                ManagedChannel channel = InProcessChannelBuilder.forName(url).build();
                StreamObserver<LogEntry.List> outboundObserver = BeamFnLoggingGrpc.newStub(channel).logging(TestStreams.withOnNext(messageDiscarder).withOnCompleted(new CountDown(waitForServerHangup)).build());
                outboundObserver.onNext(createLogsWithIds(instructionId, -instructionId));
                outboundObserver.onCompleted();
                waitForServerHangup.await();
                return null;
            });
        }
        ExecutorService executorService = Executors.newCachedThreadPool();
        executorService.invokeAll(tasks);
        assertThat(logs, containsInAnyOrder(createLogWithId(1L), createLogWithId(2L), createLogWithId(3L), createLogWithId(-1L), createLogWithId(-2L), createLogWithId(-3L)));
    }
}
Also used : ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) Callable(java.util.concurrent.Callable) ExecutorService(java.util.concurrent.ExecutorService) ManagedChannel(org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel) ArrayList(java.util.ArrayList) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) LogEntry(org.apache.beam.model.fnexecution.v1.BeamFnApi.LogEntry) Test(org.junit.Test)

Aggregations

ManagedChannel (io.grpc.ManagedChannel)163 Test (org.junit.Test)92 CountDownLatch (java.util.concurrent.CountDownLatch)26 ManagedChannel (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.ManagedChannel)26 ArrayList (java.util.ArrayList)20 Metadata (io.grpc.Metadata)18 EquivalentAddressGroup (io.grpc.EquivalentAddressGroup)15 ExecutorService (java.util.concurrent.ExecutorService)13 ByteString (com.google.protobuf.ByteString)12 Status (io.grpc.Status)12 StreamObserver (org.apache.beam.vendor.grpc.v1p43p2.io.grpc.stub.StreamObserver)11 StreamObserver (io.grpc.stub.StreamObserver)10 ConcurrentLinkedQueue (java.util.concurrent.ConcurrentLinkedQueue)10 AtomicReference (java.util.concurrent.atomic.AtomicReference)10 BeamFnApi (org.apache.beam.model.fnexecution.v1.BeamFnApi)10 CallOptions (io.grpc.CallOptions)9 Subchannel (io.grpc.LoadBalancer.Subchannel)9 SubchannelPicker (io.grpc.LoadBalancer.SubchannelPicker)9 LinkedBlockingQueue (java.util.concurrent.LinkedBlockingQueue)9 Endpoints (org.apache.beam.model.pipeline.v1.Endpoints)9