use of org.apache.beam.runners.fnexecution.artifact.ArtifactRetrievalService in project beam by apache.
the class DefaultJobBundleFactory method createServerInfo.
private ServerInfo createServerInfo(JobInfo jobInfo, ServerFactory serverFactory) throws IOException {
Preconditions.checkNotNull(serverFactory, "serverFactory can not be null");
PortablePipelineOptions portableOptions = PipelineOptionsTranslation.fromProto(jobInfo.pipelineOptions()).as(PortablePipelineOptions.class);
GrpcFnServer<FnApiControlClientPoolService> controlServer = GrpcFnServer.allocatePortAndCreateFor(FnApiControlClientPoolService.offeringClientsToPool(clientPool.getSink(), GrpcContextHeaderAccessorProvider.getHeaderAccessor()), serverFactory);
GrpcFnServer<GrpcLoggingService> loggingServer = GrpcFnServer.allocatePortAndCreateFor(GrpcLoggingService.forWriter(Slf4jLogWriter.getDefault()), serverFactory);
GrpcFnServer<ArtifactRetrievalService> retrievalServer = GrpcFnServer.allocatePortAndCreateFor(new ArtifactRetrievalService(), serverFactory);
ProvisionApi.ProvisionInfo.Builder provisionInfo = jobInfo.toProvisionInfo().toBuilder();
provisionInfo.setLoggingEndpoint(loggingServer.getApiServiceDescriptor());
provisionInfo.setArtifactEndpoint(retrievalServer.getApiServiceDescriptor());
provisionInfo.setControlEndpoint(controlServer.getApiServiceDescriptor());
GrpcFnServer<StaticGrpcProvisionService> provisioningServer = GrpcFnServer.allocatePortAndCreateFor(StaticGrpcProvisionService.create(provisionInfo.build(), GrpcContextHeaderAccessorProvider.getHeaderAccessor()), serverFactory);
GrpcFnServer<GrpcDataService> dataServer = GrpcFnServer.allocatePortAndCreateFor(GrpcDataService.create(portableOptions, executor, OutboundObserverFactory.serverDirect()), serverFactory);
GrpcFnServer<GrpcStateService> stateServer = GrpcFnServer.allocatePortAndCreateFor(GrpcStateService.create(), serverFactory);
ServerInfo serverInfo = new AutoValue_DefaultJobBundleFactory_ServerInfo.Builder().setControlServer(controlServer).setLoggingServer(loggingServer).setRetrievalServer(retrievalServer).setProvisioningServer(provisioningServer).setDataServer(dataServer).setStateServer(stateServer).build();
return serverInfo;
}
use of org.apache.beam.runners.fnexecution.artifact.ArtifactRetrievalService in project beam by apache.
the class PortableRunner method run.
@Override
public PipelineResult run(Pipeline pipeline) {
Runnable cleanup;
if (Environments.ENVIRONMENT_LOOPBACK.equals(options.as(PortablePipelineOptions.class).getDefaultEnvironmentType())) {
GrpcFnServer<ExternalWorkerService> workerService;
try {
workerService = new ExternalWorkerService(options).start();
} catch (Exception exn) {
throw new RuntimeException("Failed to start GrpcFnServer for ExternalWorkerService", exn);
}
LOG.info("Starting worker service at {}", workerService.getApiServiceDescriptor().getUrl());
options.as(PortablePipelineOptions.class).setDefaultEnvironmentConfig(workerService.getApiServiceDescriptor().getUrl());
cleanup = () -> {
try {
LOG.warn("closing worker service {}", workerService);
workerService.close();
} catch (Exception exn) {
throw new RuntimeException(exn);
}
};
} else {
cleanup = null;
}
ImmutableList.Builder<String> filesToStageBuilder = ImmutableList.builder();
List<String> stagingFiles = options.as(PortablePipelineOptions.class).getFilesToStage();
if (stagingFiles == null) {
List<String> classpathResources = detectClassPathResourcesToStage(Environments.class.getClassLoader(), options);
if (classpathResources.isEmpty()) {
throw new IllegalArgumentException("No classpath elements found.");
}
LOG.debug("PortablePipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: {}", classpathResources.size());
filesToStageBuilder.addAll(classpathResources);
} else {
filesToStageBuilder.addAll(stagingFiles);
}
// TODO(heejong): remove jar_packages experimental flag when cross-language dependency
// management is implemented for all runners.
List<String> experiments = options.as(ExperimentalOptions.class).getExperiments();
if (experiments != null) {
Optional<String> jarPackages = experiments.stream().filter((String flag) -> flag.startsWith("jar_packages=")).findFirst();
jarPackages.ifPresent(s -> filesToStageBuilder.addAll(Arrays.asList(s.replaceFirst("jar_packages=", "").split(","))));
}
options.as(PortablePipelineOptions.class).setFilesToStage(filesToStageBuilder.build());
RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(pipeline, SdkComponents.create(options));
pipelineProto = DefaultArtifactResolver.INSTANCE.resolveArtifacts(pipelineProto);
PrepareJobRequest prepareJobRequest = PrepareJobRequest.newBuilder().setJobName(options.getJobName()).setPipeline(pipelineProto).setPipelineOptions(PipelineOptionsTranslation.toProto(options)).build();
LOG.info("Using job server endpoint: {}", endpoint);
ManagedChannel jobServiceChannel = channelFactory.forDescriptor(ApiServiceDescriptor.newBuilder().setUrl(endpoint).build());
JobServiceBlockingStub jobService = JobServiceGrpc.newBlockingStub(jobServiceChannel);
try (CloseableResource<JobServiceBlockingStub> wrappedJobService = CloseableResource.of(jobService, unused -> jobServiceChannel.shutdown())) {
final int jobServerTimeout = options.as(PortablePipelineOptions.class).getJobServerTimeout();
PrepareJobResponse prepareJobResponse = jobService.withDeadlineAfter(jobServerTimeout, TimeUnit.SECONDS).withWaitForReady().prepare(prepareJobRequest);
LOG.info("PrepareJobResponse: {}", prepareJobResponse);
ApiServiceDescriptor artifactStagingEndpoint = prepareJobResponse.getArtifactStagingEndpoint();
String stagingSessionToken = prepareJobResponse.getStagingSessionToken();
try (CloseableResource<ManagedChannel> artifactChannel = CloseableResource.of(channelFactory.forDescriptor(artifactStagingEndpoint), ManagedChannel::shutdown)) {
ArtifactStagingService.offer(new ArtifactRetrievalService(), ArtifactStagingServiceGrpc.newStub(artifactChannel.get()), stagingSessionToken);
} catch (CloseableResource.CloseException e) {
LOG.warn("Error closing artifact staging channel", e);
// CloseExceptions should only be thrown while closing the channel.
} catch (Exception e) {
throw new RuntimeException("Error staging files.", e);
}
RunJobRequest runJobRequest = RunJobRequest.newBuilder().setPreparationId(prepareJobResponse.getPreparationId()).build();
// Run the job and wait for a result, we don't set a timeout here because
// it may take a long time for a job to complete and streaming
// jobs never return a response.
RunJobResponse runJobResponse = jobService.run(runJobRequest);
LOG.info("RunJobResponse: {}", runJobResponse);
ByteString jobId = runJobResponse.getJobIdBytes();
return new JobServicePipelineResult(jobId, jobServerTimeout, wrappedJobService.transfer(), cleanup);
} catch (CloseException e) {
throw new RuntimeException(e);
}
}
use of org.apache.beam.runners.fnexecution.artifact.ArtifactRetrievalService in project beam by apache.
the class ExpansionService method main.
public static void main(String[] args) throws Exception {
int port = Integer.parseInt(args[0]);
System.out.println("Starting expansion service at localhost:" + port);
// Register the options class used by the expansion service.
PipelineOptionsFactory.register(ExpansionServiceOptions.class);
@SuppressWarnings("nullness") ExpansionService service = new ExpansionService(Arrays.copyOfRange(args, 1, args.length));
for (Map.Entry<String, TransformProvider> entry : service.getRegisteredTransforms().entrySet()) {
System.out.println("\t" + entry.getKey() + ": " + entry.getValue());
}
Server server = ServerBuilder.forPort(port).addService(service).addService(new ArtifactRetrievalService()).build();
server.start();
server.awaitTermination();
}
Aggregations