use of org.apache.beam.runners.dataflow.worker.graph.Nodes.OutputReceiverNode in project beam by apache.
the class BeamFnMapTaskExecutorFactory method createOperationTransformForFetchAndFilterStreamingSideInputNodes.
private Function<Node, Node> createOperationTransformForFetchAndFilterStreamingSideInputNodes(MutableNetwork<Node, Edge> network, IdGenerator idGenerator, InstructionRequestHandler instructionRequestHandler, FnDataService beamFnDataService, Endpoints.ApiServiceDescriptor dataApiServiceDescriptor, DataflowExecutionContext executionContext, String stageName) {
return new TypeSafeNodeFunction<FetchAndFilterStreamingSideInputsNode>(FetchAndFilterStreamingSideInputsNode.class) {
@Override
public Node typedApply(FetchAndFilterStreamingSideInputsNode input) {
OutputReceiverNode output = (OutputReceiverNode) Iterables.getOnlyElement(network.successors(input));
DataflowOperationContext operationContext = executionContext.createOperationContext(NameContext.create(stageName, input.getNameContext().originalName(), input.getNameContext().systemName(), input.getNameContext().userName()));
return OperationNode.create(new FetchAndFilterStreamingSideInputsOperation<>(new OutputReceiver[] { output.getOutputReceiver() }, operationContext, instructionRequestHandler, beamFnDataService, dataApiServiceDescriptor, idGenerator, (Coder<WindowedValue<Object>>) output.getCoder(), (WindowingStrategy<?, BoundedWindow>) input.getWindowingStrategy(), executionContext.getStepContext(operationContext), input.getPCollectionViewsToWindowMappingFns()));
}
};
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.OutputReceiverNode in project beam by apache.
the class BeamFnMapTaskExecutorFactory method createOperationTransformForExecutableStageNode.
private Function<Node, Node> createOperationTransformForExecutableStageNode(final Network<Node, Edge> network, final String stageName, final DataflowExecutionContext<?> executionContext, final JobBundleFactory jobBundleFactory) {
return new TypeSafeNodeFunction<ExecutableStageNode>(ExecutableStageNode.class) {
@Override
public Node typedApply(ExecutableStageNode input) {
StageBundleFactory stageBundleFactory = jobBundleFactory.forStage(input.getExecutableStage());
Iterable<OutputReceiverNode> outputReceiverNodes = Iterables.filter(network.successors(input), OutputReceiverNode.class);
Map<String, OutputReceiver> outputReceiverMap = new HashMap<>();
Lists.newArrayList(outputReceiverNodes).stream().forEach(outputReceiverNode -> outputReceiverMap.put(outputReceiverNode.getPcollectionId(), outputReceiverNode.getOutputReceiver()));
ImmutableMap.Builder<String, DataflowOperationContext> ptransformIdToOperationContextBuilder = ImmutableMap.builder();
for (Map.Entry<String, NameContext> entry : input.getPTransformIdToPartialNameContextMap().entrySet()) {
NameContext fullNameContext = NameContext.create(stageName, entry.getValue().originalName(), entry.getValue().systemName(), entry.getValue().userName());
DataflowOperationContext operationContext = executionContext.createOperationContext(fullNameContext);
ptransformIdToOperationContextBuilder.put(entry.getKey(), operationContext);
}
ImmutableMap<String, DataflowOperationContext> ptransformIdToOperationContexts = ptransformIdToOperationContextBuilder.build();
ImmutableMap<String, SideInputReader> ptransformIdToSideInputReaders = buildPTransformIdToSideInputReadersMap(executionContext, input, ptransformIdToOperationContexts);
Map<RunnerApi.ExecutableStagePayload.SideInputId, PCollectionView<?>> ptransformIdToSideInputIdToPCollectionView = buildSideInputIdToPCollectionView(input);
return OperationNode.create(new ProcessRemoteBundleOperation(input.getExecutableStage(), executionContext.createOperationContext(NameContext.create(stageName, stageName, stageName, stageName)), stageBundleFactory, outputReceiverMap, ptransformIdToSideInputReaders, ptransformIdToSideInputIdToPCollectionView));
}
};
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.OutputReceiverNode in project beam by apache.
the class BeamFnMapTaskExecutorFactory method createOperationTransformForGrpcPortNodes.
private Function<Node, Node> createOperationTransformForGrpcPortNodes(final Network<Node, Edge> network, final FnDataService beamFnDataService, final OperationContext context) {
return new TypeSafeNodeFunction<RemoteGrpcPortNode>(RemoteGrpcPortNode.class) {
@Override
public Node typedApply(RemoteGrpcPortNode input) {
RegisterAndProcessBundleOperation registerFnOperation = (RegisterAndProcessBundleOperation) Iterables.getOnlyElement(Iterables.filter(network.adjacentNodes(input), OperationNode.class)).getOperation();
// The coder comes from the one and only adjacent output node
Coder<?> coder = Iterables.getOnlyElement(Iterables.filter(network.adjacentNodes(input), OutputReceiverNode.class)).getCoder();
// We figure out whether we are outputting some where if the output node is a
// successor.
Iterable<OutputReceiverNode> outputReceiverNodes = Iterables.filter(network.successors(input), OutputReceiverNode.class);
Operation operation;
if (outputReceiverNodes.iterator().hasNext()) {
OutputReceiver[] outputReceivers = new OutputReceiver[] { Iterables.getOnlyElement(outputReceiverNodes).getOutputReceiver() };
operation = new RemoteGrpcPortReadOperation<>(beamFnDataService, input.getPrimitiveTransformId(), registerFnOperation::getProcessBundleInstructionId, (Coder) coder, outputReceivers, context);
} else {
operation = new RemoteGrpcPortWriteOperation<>(beamFnDataService, input.getPrimitiveTransformId(), registerFnOperation::getProcessBundleInstructionId, (Coder) coder, context);
}
return OperationNode.create(operation);
}
};
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.OutputReceiverNode in project beam by apache.
the class BeamFnMapTaskExecutorFactory method create.
/**
* Creates a new {@link DataflowMapTaskExecutor} from the given {@link MapTask} definition using
* the provided {@link ReaderFactory}.
*/
@Override
public DataflowMapTaskExecutor create(InstructionRequestHandler instructionRequestHandler, GrpcFnServer<GrpcDataService> grpcDataFnServer, Endpoints.ApiServiceDescriptor dataApiServiceDescriptor, GrpcFnServer<GrpcStateService> grpcStateFnServer, MutableNetwork<Node, Edge> network, PipelineOptions options, String stageName, ReaderFactory readerFactory, SinkFactory sinkFactory, DataflowExecutionContext<?> executionContext, CounterSet counterSet, IdGenerator idGenerator) {
// TODO: remove this once we trust the code paths
checkArgument(DataflowRunner.hasExperiment(options.as(DataflowPipelineDebugOptions.class), "beam_fn_api"), "%s should only be used when beam_fn_api is enabled", getClass().getSimpleName());
// Swap out all the InstructionOutput nodes with OutputReceiver nodes
Networks.replaceDirectedNetworkNodes(network, createOutputReceiversTransform(stageName, counterSet));
if (DataflowRunner.hasExperiment(options.as(DataflowPipelineDebugOptions.class), "use_executable_stage_bundle_execution")) {
LOG.debug("Using SingleEnvironmentInstanceJobBundleFactory");
JobBundleFactory jobBundleFactory = SingleEnvironmentInstanceJobBundleFactory.create(StaticRemoteEnvironmentFactory.forService(instructionRequestHandler), grpcDataFnServer, grpcStateFnServer, idGenerator);
// If the use_executable_stage_bundle_execution is enabled, use ExecutableStage instead.
Networks.replaceDirectedNetworkNodes(network, createOperationTransformForExecutableStageNode(network, stageName, executionContext, jobBundleFactory));
} else {
// Swap out all the RegisterFnRequest nodes with Operation nodes
Networks.replaceDirectedNetworkNodes(network, createOperationTransformForRegisterFnNodes(idGenerator, instructionRequestHandler, grpcStateFnServer.getService(), stageName, executionContext));
// Swap out all the RemoteGrpcPort nodes with Operation nodes, note that it is expected
// that the RegisterFnRequest nodes have already been replaced.
Networks.replaceDirectedNetworkNodes(network, createOperationTransformForGrpcPortNodes(network, grpcDataFnServer.getService(), // TODO: Set NameContext properly for these operations.
executionContext.createOperationContext(NameContext.create(stageName, stageName, stageName, stageName))));
}
// Swap out all the FetchAndFilterStreamingSideInput nodes with operation nodes
Networks.replaceDirectedNetworkNodes(network, createOperationTransformForFetchAndFilterStreamingSideInputNodes(network, idGenerator, instructionRequestHandler, grpcDataFnServer.getService(), dataApiServiceDescriptor, executionContext, stageName));
// Swap out all the ParallelInstruction nodes with Operation nodes
Networks.replaceDirectedNetworkNodes(network, createOperationTransformForParallelInstructionNodes(stageName, network, options, readerFactory, sinkFactory, executionContext));
// Collect all the operations within the network and attach all the operations as receivers
// to preceding output receivers.
List<Operation> topoSortedOperations = new ArrayList<>();
for (OperationNode node : Iterables.filter(Networks.topologicalOrder(network), OperationNode.class)) {
topoSortedOperations.add(node.getOperation());
for (Node predecessor : Iterables.filter(network.predecessors(node), OutputReceiverNode.class)) {
((OutputReceiverNode) predecessor).getOutputReceiver().addOutput((Receiver) node.getOperation());
}
}
if (LOG.isDebugEnabled()) {
LOG.info("Map task network: {}", Networks.toDot(network));
}
return BeamFnMapTaskExecutor.withSharedCounterSet(topoSortedOperations, counterSet, executionContext.getExecutionStateTracker());
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.OutputReceiverNode in project beam by apache.
the class IntrinsicMapTaskExecutorFactory method create.
/**
* Creates a new {@link DataflowMapTaskExecutor} from the given {@link MapTask} definition using
* the provided {@link ReaderFactory}.
*/
@Override
public DataflowMapTaskExecutor create(InstructionRequestHandler instructionRequestHandler, GrpcFnServer<GrpcDataService> grpcDataFnServer, Endpoints.ApiServiceDescriptor dataApiServiceDescriptor, GrpcFnServer<GrpcStateService> grpcStateFnServer, MutableNetwork<Node, Edge> network, PipelineOptions options, String stageName, ReaderFactory readerFactory, SinkFactory sinkFactory, DataflowExecutionContext<?> executionContext, CounterSet counterSet, IdGenerator idGenerator) {
// TODO: remove this once we trust the code paths
checkArgument(!DataflowRunner.hasExperiment(options.as(DataflowPipelineDebugOptions.class), "beam_fn_api"), "experiment beam_fn_api turned on but non-Fn API MapTaskExecutorFactory invoked");
// Swap out all the InstructionOutput nodes with OutputReceiver nodes
Networks.replaceDirectedNetworkNodes(network, createOutputReceiversTransform(stageName, counterSet));
// Swap out all the ParallelInstruction nodes with Operation nodes
Networks.replaceDirectedNetworkNodes(network, createOperationTransformForParallelInstructionNodes(stageName, network, options, readerFactory, sinkFactory, executionContext));
// Collect all the operations within the network and attach all the operations as receivers
// to preceding output receivers.
List<Operation> topoSortedOperations = new ArrayList<>();
for (OperationNode node : Iterables.filter(Networks.topologicalOrder(network), OperationNode.class)) {
topoSortedOperations.add(node.getOperation());
for (Node predecessor : Iterables.filter(network.predecessors(node), OutputReceiverNode.class)) {
((OutputReceiverNode) predecessor).getOutputReceiver().addOutput((Receiver) node.getOperation());
}
}
if (LOG.isDebugEnabled()) {
LOG.info("Map task network: {}", Networks.toDot(network));
}
return IntrinsicMapTaskExecutor.withSharedCounterSet(topoSortedOperations, counterSet, executionContext.getExecutionStateTracker());
}
Aggregations