Search in sources :

Example 11 with InstructionOutputNode

use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.

the class RegisterNodeFunction method apply.

@Override
public Node apply(MutableNetwork<Node, Edge> input) {
    for (Node node : input.nodes()) {
        if (node instanceof RemoteGrpcPortNode || node instanceof ParallelInstructionNode || node instanceof InstructionOutputNode) {
            continue;
        }
        throw new IllegalArgumentException(String.format("Network contains unknown type of node: %s", input));
    }
    // Fix all non output nodes to have named edges.
    for (Node node : input.nodes()) {
        if (node instanceof InstructionOutputNode) {
            continue;
        }
        for (Node successor : input.successors(node)) {
            for (Edge edge : input.edgesConnecting(node, successor)) {
                if (edge instanceof DefaultEdge) {
                    input.removeEdge(edge);
                    input.addEdge(node, successor, MultiOutputInfoEdge.create(new MultiOutputInfo().setTag(idGenerator.getId())));
                }
            }
        }
    }
    // We start off by replacing all edges within the graph with edges that have the named
    // outputs from the predecessor step. For ParallelInstruction Source nodes and RemoteGrpcPort
    // nodes this is a generated port id. All ParDoInstructions will have already
    ProcessBundleDescriptor.Builder processBundleDescriptor = ProcessBundleDescriptor.newBuilder().setId(idGenerator.getId()).setStateApiServiceDescriptor(stateApiServiceDescriptor);
    // For intermediate PCollections we fabricate, we make a bogus WindowingStrategy
    // TODO: create a correct windowing strategy, including coders and environment
    SdkComponents sdkComponents = SdkComponents.create(pipeline.getComponents(), null);
    // Default to use the Java environment if pipeline doesn't have environment specified.
    if (pipeline.getComponents().getEnvironmentsMap().isEmpty()) {
        sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
    }
    String fakeWindowingStrategyId = "fakeWindowingStrategy" + idGenerator.getId();
    try {
        RunnerApi.MessageWithComponents fakeWindowingStrategyProto = WindowingStrategyTranslation.toMessageProto(WindowingStrategy.globalDefault(), sdkComponents);
        processBundleDescriptor.putWindowingStrategies(fakeWindowingStrategyId, fakeWindowingStrategyProto.getWindowingStrategy()).putAllCoders(fakeWindowingStrategyProto.getComponents().getCodersMap()).putAllEnvironments(fakeWindowingStrategyProto.getComponents().getEnvironmentsMap());
    } catch (IOException exc) {
        throw new RuntimeException("Could not convert default windowing stratey to proto", exc);
    }
    Map<Node, String> nodesToPCollections = new HashMap<>();
    ImmutableMap.Builder<String, NameContext> ptransformIdToNameContexts = ImmutableMap.builder();
    ImmutableMap.Builder<String, Iterable<SideInputInfo>> ptransformIdToSideInputInfos = ImmutableMap.builder();
    ImmutableMap.Builder<String, Iterable<PCollectionView<?>>> ptransformIdToPCollectionViews = ImmutableMap.builder();
    ImmutableMap.Builder<String, NameContext> pcollectionIdToNameContexts = ImmutableMap.builder();
    ImmutableMap.Builder<InstructionOutputNode, String> instructionOutputNodeToCoderIdBuilder = ImmutableMap.builder();
    // 2. Generate new PCollectionId and register it with ProcessBundleDescriptor.
    for (InstructionOutputNode node : Iterables.filter(input.nodes(), InstructionOutputNode.class)) {
        InstructionOutput instructionOutput = node.getInstructionOutput();
        String coderId = "generatedCoder" + idGenerator.getId();
        instructionOutputNodeToCoderIdBuilder.put(node, coderId);
        try (ByteString.Output output = ByteString.newOutput()) {
            try {
                Coder<?> javaCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(instructionOutput.getCodec()));
                sdkComponents.registerCoder(javaCoder);
                RunnerApi.Coder coderProto = CoderTranslation.toProto(javaCoder, sdkComponents);
                processBundleDescriptor.putCoders(coderId, coderProto);
            } catch (IOException e) {
                throw new IllegalArgumentException(String.format("Unable to encode coder %s for output %s", instructionOutput.getCodec(), instructionOutput), e);
            } catch (Exception e) {
                // Coder probably wasn't a java coder
                OBJECT_MAPPER.writeValue(output, instructionOutput.getCodec());
                processBundleDescriptor.putCoders(coderId, RunnerApi.Coder.newBuilder().setSpec(RunnerApi.FunctionSpec.newBuilder().setPayload(output.toByteString())).build());
            }
        } catch (IOException e) {
            throw new IllegalArgumentException(String.format("Unable to encode coder %s for output %s", instructionOutput.getCodec(), instructionOutput), e);
        }
        // Generate new PCollection ID and map it to relevant node.
        // Will later be used to fill PTransform inputs/outputs information.
        String pcollectionId = "generatedPcollection" + idGenerator.getId();
        processBundleDescriptor.putPcollections(pcollectionId, RunnerApi.PCollection.newBuilder().setCoderId(coderId).setWindowingStrategyId(fakeWindowingStrategyId).build());
        nodesToPCollections.put(node, pcollectionId);
        pcollectionIdToNameContexts.put(pcollectionId, NameContext.create(null, instructionOutput.getOriginalName(), instructionOutput.getSystemName(), instructionOutput.getName()));
    }
    processBundleDescriptor.putAllCoders(sdkComponents.toComponents().getCodersMap());
    Map<InstructionOutputNode, String> instructionOutputNodeToCoderIdMap = instructionOutputNodeToCoderIdBuilder.build();
    for (ParallelInstructionNode node : Iterables.filter(input.nodes(), ParallelInstructionNode.class)) {
        ParallelInstruction parallelInstruction = node.getParallelInstruction();
        String ptransformId = "generatedPtransform" + idGenerator.getId();
        ptransformIdToNameContexts.put(ptransformId, NameContext.create(null, parallelInstruction.getOriginalName(), parallelInstruction.getSystemName(), parallelInstruction.getName()));
        RunnerApi.PTransform.Builder pTransform = RunnerApi.PTransform.newBuilder();
        RunnerApi.FunctionSpec.Builder transformSpec = RunnerApi.FunctionSpec.newBuilder();
        if (parallelInstruction.getParDo() != null) {
            ParDoInstruction parDoInstruction = parallelInstruction.getParDo();
            CloudObject userFnSpec = CloudObject.fromSpec(parDoInstruction.getUserFn());
            String userFnClassName = userFnSpec.getClassName();
            if ("CombineValuesFn".equals(userFnClassName) || "KeyedCombineFn".equals(userFnClassName)) {
                transformSpec = transformCombineValuesFnToFunctionSpec(userFnSpec);
                ptransformIdToPCollectionViews.put(ptransformId, Collections.emptyList());
            } else {
                String parDoPTransformId = getString(userFnSpec, PropertyNames.SERIALIZED_FN);
                RunnerApi.PTransform parDoPTransform = pipeline.getComponents().getTransformsOrDefault(parDoPTransformId, null);
                // TODO: only the non-null branch should exist; for migration ease only
                if (parDoPTransform != null) {
                    checkArgument(parDoPTransform.getSpec().getUrn().equals(PTransformTranslation.PAR_DO_TRANSFORM_URN), "Found transform \"%s\" for ParallelDo instruction, " + " but that transform had unexpected URN \"%s\" (expected \"%s\")", parDoPTransformId, parDoPTransform.getSpec().getUrn(), PTransformTranslation.PAR_DO_TRANSFORM_URN);
                    RunnerApi.ParDoPayload parDoPayload;
                    try {
                        parDoPayload = RunnerApi.ParDoPayload.parseFrom(parDoPTransform.getSpec().getPayload());
                    } catch (InvalidProtocolBufferException exc) {
                        throw new RuntimeException("ParDo did not have a ParDoPayload", exc);
                    }
                    ImmutableList.Builder<PCollectionView<?>> pcollectionViews = ImmutableList.builder();
                    for (Map.Entry<String, SideInput> sideInputEntry : parDoPayload.getSideInputsMap().entrySet()) {
                        pcollectionViews.add(transformSideInputForRunner(pipeline, parDoPTransform, sideInputEntry.getKey(), sideInputEntry.getValue()));
                        transformSideInputForSdk(pipeline, parDoPTransform, sideInputEntry.getKey(), processBundleDescriptor, pTransform);
                    }
                    ptransformIdToPCollectionViews.put(ptransformId, pcollectionViews.build());
                    transformSpec.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN).setPayload(parDoPayload.toByteString());
                } else {
                    // legacy path - bytes are the FunctionSpec's payload field, basically, and
                    // SDKs expect it in the PTransform's payload field
                    byte[] userFnBytes = getBytes(userFnSpec, PropertyNames.SERIALIZED_FN);
                    transformSpec.setUrn(ParDoTranslation.CUSTOM_JAVA_DO_FN_URN).setPayload(ByteString.copyFrom(userFnBytes));
                }
                // Add side input information for batch pipelines
                if (parDoInstruction.getSideInputs() != null) {
                    ptransformIdToSideInputInfos.put(ptransformId, forSideInputInfos(parDoInstruction.getSideInputs(), true));
                }
            }
        } else if (parallelInstruction.getRead() != null) {
            ReadInstruction readInstruction = parallelInstruction.getRead();
            CloudObject sourceSpec = CloudObject.fromSpec(CloudSourceUtils.flattenBaseSpecs(readInstruction.getSource()).getSpec());
            // TODO: Need to plumb through the SDK specific function spec.
            transformSpec.setUrn(JAVA_SOURCE_URN);
            try {
                byte[] serializedSource = Base64.getDecoder().decode(getString(sourceSpec, SERIALIZED_SOURCE));
                ByteString sourceByteString = ByteString.copyFrom(serializedSource);
                transformSpec.setPayload(sourceByteString);
            } catch (Exception e) {
                throw new IllegalArgumentException(String.format("Unable to process Read %s", parallelInstruction), e);
            }
        } else if (parallelInstruction.getFlatten() != null) {
            transformSpec.setUrn(PTransformTranslation.FLATTEN_TRANSFORM_URN);
        } else {
            throw new IllegalArgumentException(String.format("Unknown type of ParallelInstruction %s", parallelInstruction));
        }
        for (Node predecessorOutput : input.predecessors(node)) {
            pTransform.putInputs("generatedInput" + idGenerator.getId(), nodesToPCollections.get(predecessorOutput));
        }
        for (Edge edge : input.outEdges(node)) {
            Node nodeOutput = input.incidentNodes(edge).target();
            MultiOutputInfoEdge edge2 = (MultiOutputInfoEdge) edge;
            pTransform.putOutputs(edge2.getMultiOutputInfo().getTag(), nodesToPCollections.get(nodeOutput));
        }
        pTransform.setSpec(transformSpec);
        processBundleDescriptor.putTransforms(ptransformId, pTransform.build());
    }
    // Add the PTransforms representing the remote gRPC nodes
    for (RemoteGrpcPortNode node : Iterables.filter(input.nodes(), RemoteGrpcPortNode.class)) {
        RunnerApi.PTransform.Builder pTransform = RunnerApi.PTransform.newBuilder();
        Set<Node> predecessors = input.predecessors(node);
        Set<Node> successors = input.successors(node);
        if (predecessors.isEmpty() && !successors.isEmpty()) {
            Node instructionOutputNode = Iterables.getOnlyElement(successors);
            pTransform.putOutputs("generatedOutput" + idGenerator.getId(), nodesToPCollections.get(instructionOutputNode));
            pTransform.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).setPayload(node.getRemoteGrpcPort().toBuilder().setCoderId(instructionOutputNodeToCoderIdMap.get(instructionOutputNode)).build().toByteString()).build());
        } else if (!predecessors.isEmpty() && successors.isEmpty()) {
            Node instructionOutputNode = Iterables.getOnlyElement(predecessors);
            pTransform.putInputs("generatedInput" + idGenerator.getId(), nodesToPCollections.get(instructionOutputNode));
            pTransform.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_OUTPUT_URN).setPayload(node.getRemoteGrpcPort().toBuilder().setCoderId(instructionOutputNodeToCoderIdMap.get(instructionOutputNode)).build().toByteString()).build());
        } else {
            throw new IllegalStateException("Expected either one input OR one output " + "InstructionOutputNode for this RemoteGrpcPortNode");
        }
        processBundleDescriptor.putTransforms(node.getPrimitiveTransformId(), pTransform.build());
    }
    return RegisterRequestNode.create(RegisterRequest.newBuilder().addProcessBundleDescriptor(processBundleDescriptor).build(), ptransformIdToNameContexts.build(), ptransformIdToSideInputInfos.build(), ptransformIdToPCollectionViews.build(), pcollectionIdToNameContexts.build());
}
Also used : HashMap(java.util.HashMap) MultiOutputInfo(com.google.api.services.dataflow.model.MultiOutputInfo) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) RegisterRequestNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.RegisterRequestNode) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) Node(org.apache.beam.runners.dataflow.worker.graph.Nodes.Node) PipelineNode(org.apache.beam.runners.core.construction.graph.PipelineNode) RemoteGrpcPortNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.RemoteGrpcPortNode) InstructionOutput(com.google.api.services.dataflow.model.InstructionOutput) Structs.getString(org.apache.beam.runners.dataflow.util.Structs.getString) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) RunnerApi(org.apache.beam.model.pipeline.v1.RunnerApi) SideInput(org.apache.beam.model.pipeline.v1.RunnerApi.SideInput) DefaultEdge(org.apache.beam.runners.dataflow.worker.graph.Edges.DefaultEdge) MultiOutputInfoEdge(org.apache.beam.runners.dataflow.worker.graph.Edges.MultiOutputInfoEdge) ImmutableMap(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap) ParDoInstruction(com.google.api.services.dataflow.model.ParDoInstruction) DataflowPortabilityPCollectionView(org.apache.beam.runners.dataflow.worker.DataflowPortabilityPCollectionView) PCollectionView(org.apache.beam.sdk.values.PCollectionView) ImmutableMap(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap) Map(java.util.Map) HashMap(java.util.HashMap) RemoteGrpcPortNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.RemoteGrpcPortNode) ProcessBundleDescriptor(org.apache.beam.model.fnexecution.v1.BeamFnApi.ProcessBundleDescriptor) ImmutableList(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) SdkComponents(org.apache.beam.runners.core.construction.SdkComponents) ReadInstruction(com.google.api.services.dataflow.model.ReadInstruction) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) NameContext(org.apache.beam.runners.dataflow.worker.counters.NameContext) InvalidProtocolBufferException(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException) IOException(java.io.IOException) InvalidProtocolBufferException(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.InvalidProtocolBufferException) IOException(java.io.IOException) ParallelInstruction(com.google.api.services.dataflow.model.ParallelInstruction) CloudObject(org.apache.beam.runners.dataflow.util.CloudObject) Edge(org.apache.beam.runners.dataflow.worker.graph.Edges.Edge) MultiOutputInfoEdge(org.apache.beam.runners.dataflow.worker.graph.Edges.MultiOutputInfoEdge) DefaultEdge(org.apache.beam.runners.dataflow.worker.graph.Edges.DefaultEdge)

Example 12 with InstructionOutputNode

use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.

the class CloneAmbiguousFlattensFunction method cloneFlatten.

/**
 * A helper function which performs the actual cloning procedure, which means creating the runner
 * and SDK versions of both the ambiguous flatten and its PCollection, attaching the old flatten's
 * predecessors and successors properly, and then removing the ambiguous flatten from the network.
 */
private void cloneFlatten(Node flatten, MutableNetwork<Node, Edge> network) {
    // Start by creating the clones of the flatten and its PCollection.
    InstructionOutputNode flattenOut = (InstructionOutputNode) Iterables.getOnlyElement(network.successors(flatten));
    ParallelInstruction flattenInstruction = ((ParallelInstructionNode) flatten).getParallelInstruction();
    Node runnerFlatten = ParallelInstructionNode.create(flattenInstruction, ExecutionLocation.RUNNER_HARNESS);
    Node runnerFlattenOut = InstructionOutputNode.create(flattenOut.getInstructionOutput(), flattenOut.getPcollectionId());
    network.addNode(runnerFlatten);
    network.addNode(runnerFlattenOut);
    Node sdkFlatten = ParallelInstructionNode.create(flattenInstruction, ExecutionLocation.SDK_HARNESS);
    Node sdkFlattenOut = InstructionOutputNode.create(flattenOut.getInstructionOutput(), flattenOut.getPcollectionId());
    network.addNode(sdkFlatten);
    network.addNode(sdkFlattenOut);
    for (Edge edge : ImmutableList.copyOf(network.edgesConnecting(flatten, flattenOut))) {
        network.addEdge(runnerFlatten, runnerFlattenOut, edge.clone());
        network.addEdge(sdkFlatten, sdkFlattenOut, edge.clone());
    }
    // Copy over predecessor edges to both cloned nodes.
    for (Node predecessor : network.predecessors(flatten)) {
        for (Edge edge : ImmutableList.copyOf(network.edgesConnecting(predecessor, flatten))) {
            network.addEdge(predecessor, runnerFlatten, edge.clone());
            network.addEdge(predecessor, sdkFlatten, edge.clone());
        }
    }
    // Copy over successor edges depending on execution locations of successors.
    for (Node successor : network.successors(flattenOut)) {
        // Connect successor to SDK harness only if sure it executes in SDK.
        Node selectedOutput = executesInSdkHarness(successor) ? sdkFlattenOut : runnerFlattenOut;
        for (Edge edge : ImmutableList.copyOf(network.edgesConnecting(flattenOut, successor))) {
            network.addEdge(selectedOutput, successor, edge.clone());
        }
    }
    network.removeNode(flatten);
    network.removeNode(flattenOut);
}
Also used : InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) ParallelInstruction(com.google.api.services.dataflow.model.ParallelInstruction) Node(org.apache.beam.runners.dataflow.worker.graph.Nodes.Node) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) Edge(org.apache.beam.runners.dataflow.worker.graph.Edges.Edge)

Example 13 with InstructionOutputNode

use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.

the class BeamFnMapTaskExecutorFactory method createOutputReceiversTransform.

/**
 * Returns a function which can convert {@link InstructionOutput}s into {@link OutputReceiver}s.
 */
static Function<Node, Node> createOutputReceiversTransform(final String stageName, final CounterFactory counterFactory) {
    return new TypeSafeNodeFunction<InstructionOutputNode>(InstructionOutputNode.class) {

        @Override
        public Node typedApply(InstructionOutputNode input) {
            InstructionOutput cloudOutput = input.getInstructionOutput();
            OutputReceiver outputReceiver = new OutputReceiver();
            Coder<?> coder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(cloudOutput.getCodec()));
            @SuppressWarnings("unchecked") ElementCounter outputCounter = new DataflowOutputCounter(cloudOutput.getName(), new ElementByteSizeObservableCoder<>(coder), counterFactory, NameContext.create(stageName, cloudOutput.getOriginalName(), cloudOutput.getSystemName(), cloudOutput.getName()));
            outputReceiver.addOutputCounter(outputCounter);
            return OutputReceiverNode.create(outputReceiver, coder, input.getPcollectionId());
        }
    };
}
Also used : InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) InstructionOutput(com.google.api.services.dataflow.model.InstructionOutput) OutputReceiver(org.apache.beam.runners.dataflow.worker.util.common.worker.OutputReceiver) TypeSafeNodeFunction(org.apache.beam.runners.dataflow.worker.graph.Networks.TypeSafeNodeFunction) ElementCounter(org.apache.beam.runners.dataflow.worker.util.common.worker.ElementCounter)

Example 14 with InstructionOutputNode

use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.

the class StreamingDataflowWorker method process.

private void process(final SdkWorkerHarness worker, final ComputationState computationState, final Instant inputDataWatermark, @Nullable final Instant outputDataWatermark, @Nullable final Instant synchronizedProcessingTime, final Work work) {
    final Windmill.WorkItem workItem = work.getWorkItem();
    final String computationId = computationState.getComputationId();
    final ByteString key = workItem.getKey();
    work.setState(State.PROCESSING);
    {
        StringBuilder workIdBuilder = new StringBuilder(33);
        workIdBuilder.append(Long.toHexString(workItem.getShardingKey()));
        workIdBuilder.append('-');
        workIdBuilder.append(Long.toHexString(workItem.getWorkToken()));
        DataflowWorkerLoggingMDC.setWorkId(workIdBuilder.toString());
    }
    DataflowWorkerLoggingMDC.setStageName(computationId);
    LOG.debug("Starting processing for {}:\n{}", computationId, work);
    Windmill.WorkItemCommitRequest.Builder outputBuilder = initializeOutputBuilder(key, workItem);
    // Before any processing starts, call any pending OnCommit callbacks.  Nothing that requires
    // cleanup should be done before this, since we might exit early here.
    callFinalizeCallbacks(workItem);
    if (workItem.getSourceState().getOnlyFinalize()) {
        outputBuilder.setSourceStateUpdates(Windmill.SourceState.newBuilder().setOnlyFinalize(true));
        work.setState(State.COMMIT_QUEUED);
        commitQueue.put(new Commit(outputBuilder.build(), computationState, work));
        return;
    }
    long processingStartTimeNanos = System.nanoTime();
    final MapTask mapTask = computationState.getMapTask();
    StageInfo stageInfo = stageInfoMap.computeIfAbsent(mapTask.getStageName(), s -> new StageInfo(s, mapTask.getSystemName(), this));
    ExecutionState executionState = null;
    try {
        executionState = computationState.getExecutionStateQueue(worker).poll();
        if (executionState == null) {
            MutableNetwork<Node, Edge> mapTaskNetwork = mapTaskToNetwork.apply(mapTask);
            if (LOG.isDebugEnabled()) {
                LOG.debug("Network as Graphviz .dot: {}", Networks.toDot(mapTaskNetwork));
            }
            ParallelInstructionNode readNode = (ParallelInstructionNode) Iterables.find(mapTaskNetwork.nodes(), node -> node instanceof ParallelInstructionNode && ((ParallelInstructionNode) node).getParallelInstruction().getRead() != null);
            InstructionOutputNode readOutputNode = (InstructionOutputNode) Iterables.getOnlyElement(mapTaskNetwork.successors(readNode));
            DataflowExecutionContext.DataflowExecutionStateTracker executionStateTracker = new DataflowExecutionContext.DataflowExecutionStateTracker(ExecutionStateSampler.instance(), stageInfo.executionStateRegistry.getState(NameContext.forStage(mapTask.getStageName()), "other", null, ScopedProfiler.INSTANCE.emptyScope()), stageInfo.deltaCounters, options, computationId);
            StreamingModeExecutionContext context = new StreamingModeExecutionContext(pendingDeltaCounters, computationId, readerCache, !computationState.getTransformUserNameToStateFamily().isEmpty() ? computationState.getTransformUserNameToStateFamily() : stateNameMap, stateCache.forComputation(computationId), stageInfo.metricsContainerRegistry, executionStateTracker, stageInfo.executionStateRegistry, maxSinkBytes);
            DataflowMapTaskExecutor mapTaskExecutor = mapTaskExecutorFactory.create(worker.getControlClientHandler(), worker.getGrpcDataFnServer(), sdkHarnessRegistry.beamFnDataApiServiceDescriptor(), worker.getGrpcStateFnServer(), mapTaskNetwork, options, mapTask.getStageName(), readerRegistry, sinkRegistry, context, pendingDeltaCounters, idGenerator);
            ReadOperation readOperation = mapTaskExecutor.getReadOperation();
            // Disable progress updates since its results are unused  for streaming
            // and involves starting a thread.
            readOperation.setProgressUpdatePeriodMs(ReadOperation.DONT_UPDATE_PERIODICALLY);
            Preconditions.checkState(mapTaskExecutor.supportsRestart(), "Streaming runner requires all operations support restart.");
            Coder<?> readCoder;
            readCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(readOutputNode.getInstructionOutput().getCodec()));
            Coder<?> keyCoder = extractKeyCoder(readCoder);
            // If using a custom source, count bytes read for autoscaling.
            if (CustomSources.class.getName().equals(readNode.getParallelInstruction().getRead().getSource().getSpec().get("@type"))) {
                NameContext nameContext = NameContext.create(mapTask.getStageName(), readNode.getParallelInstruction().getOriginalName(), readNode.getParallelInstruction().getSystemName(), readNode.getParallelInstruction().getName());
                readOperation.receivers[0].addOutputCounter(new OutputObjectAndByteCounter(new IntrinsicMapTaskExecutorFactory.ElementByteSizeObservableCoder<>(readCoder), mapTaskExecutor.getOutputCounters(), nameContext).setSamplingPeriod(100).countBytes("dataflow_input_size-" + mapTask.getSystemName()));
            }
            executionState = new ExecutionState(mapTaskExecutor, context, keyCoder, executionStateTracker);
        }
        WindmillStateReader stateReader = new WindmillStateReader(metricTrackingWindmillServer, computationId, key, workItem.getShardingKey(), workItem.getWorkToken());
        StateFetcher localStateFetcher = stateFetcher.byteTrackingView();
        // If the read output KVs, then we can decode Windmill's byte key into a userland
        // key object and provide it to the execution context for use with per-key state.
        // Otherwise, we pass null.
        // 
        // The coder type that will be present is:
        // WindowedValueCoder(TimerOrElementCoder(KvCoder))
        @Nullable Coder<?> keyCoder = executionState.getKeyCoder();
        @Nullable Object executionKey = keyCoder == null ? null : keyCoder.decode(key.newInput(), Coder.Context.OUTER);
        if (workItem.hasHotKeyInfo()) {
            Windmill.HotKeyInfo hotKeyInfo = workItem.getHotKeyInfo();
            Duration hotKeyAge = Duration.millis(hotKeyInfo.getHotKeyAgeUsec() / 1000);
            // The MapTask instruction is ordered by dependencies, such that the first element is
            // always going to be the shuffle task.
            String stepName = computationState.getMapTask().getInstructions().get(0).getName();
            if (options.isHotKeyLoggingEnabled() && keyCoder != null) {
                hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge, executionKey);
            } else {
                hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge);
            }
        }
        executionState.getContext().start(executionKey, workItem, inputDataWatermark, outputDataWatermark, synchronizedProcessingTime, stateReader, localStateFetcher, outputBuilder);
        // Blocks while executing work.
        executionState.getWorkExecutor().execute();
        Iterables.addAll(this.pendingMonitoringInfos, executionState.getWorkExecutor().extractMetricUpdates());
        commitCallbacks.putAll(executionState.getContext().flushState());
        // Release the execution state for another thread to use.
        computationState.getExecutionStateQueue(worker).offer(executionState);
        executionState = null;
        // Add the output to the commit queue.
        work.setState(State.COMMIT_QUEUED);
        WorkItemCommitRequest commitRequest = outputBuilder.build();
        int byteLimit = maxWorkItemCommitBytes;
        int commitSize = commitRequest.getSerializedSize();
        int estimatedCommitSize = commitSize < 0 ? Integer.MAX_VALUE : commitSize;
        // Detect overflow of integer serialized size or if the byte limit was exceeded.
        windmillMaxObservedWorkItemCommitBytes.addValue(estimatedCommitSize);
        if (commitSize < 0 || commitSize > byteLimit) {
            KeyCommitTooLargeException e = KeyCommitTooLargeException.causedBy(computationId, byteLimit, commitRequest);
            reportFailure(computationId, workItem, e);
            LOG.error(e.toString());
            // Drop the current request in favor of a new, minimal one requesting truncation.
            // Messages, timers, counters, and other commit content will not be used by the service
            // so we're purposefully dropping them here
            commitRequest = buildWorkItemTruncationRequest(key, workItem, estimatedCommitSize);
        }
        commitQueue.put(new Commit(commitRequest, computationState, work));
        // Compute shuffle and state byte statistics these will be flushed asynchronously.
        long stateBytesWritten = outputBuilder.clearOutputMessages().build().getSerializedSize();
        long shuffleBytesRead = 0;
        for (Windmill.InputMessageBundle bundle : workItem.getMessageBundlesList()) {
            for (Windmill.Message message : bundle.getMessagesList()) {
                shuffleBytesRead += message.getSerializedSize();
            }
        }
        long stateBytesRead = stateReader.getBytesRead() + localStateFetcher.getBytesRead();
        windmillShuffleBytesRead.addValue(shuffleBytesRead);
        windmillStateBytesRead.addValue(stateBytesRead);
        windmillStateBytesWritten.addValue(stateBytesWritten);
        LOG.debug("Processing done for work token: {}", workItem.getWorkToken());
    } catch (Throwable t) {
        if (executionState != null) {
            try {
                executionState.getContext().invalidateCache();
                executionState.getWorkExecutor().close();
            } catch (Exception e) {
                LOG.warn("Failed to close map task executor: ", e);
            } finally {
                // Release references to potentially large objects early.
                executionState = null;
            }
        }
        t = t instanceof UserCodeException ? t.getCause() : t;
        boolean retryLocally = false;
        if (KeyTokenInvalidException.isKeyTokenInvalidException(t)) {
            LOG.debug("Execution of work for computation '{}' on key '{}' failed due to token expiration. " + "Work will not be retried locally.", computationId, key.toStringUtf8());
        } else {
            LastExceptionDataProvider.reportException(t);
            LOG.debug("Failed work: {}", work);
            Duration elapsedTimeSinceStart = new Duration(Instant.now(), work.getStartTime());
            if (!reportFailure(computationId, workItem, t)) {
                LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception, " + "and Windmill indicated not to retry locally.", computationId, key.toStringUtf8(), t);
            } else if (isOutOfMemoryError(t)) {
                File heapDump = memoryMonitor.tryToDumpHeap();
                LOG.error("Execution of work for computation '{}' for key '{}' failed with out-of-memory. " + "Work will not be retried locally. Heap dump {}.", computationId, key.toStringUtf8(), heapDump == null ? "not written" : ("written to '" + heapDump + "'"), t);
            } else if (elapsedTimeSinceStart.isLongerThan(MAX_LOCAL_PROCESSING_RETRY_DURATION)) {
                LOG.error("Execution of work for computation '{}' for key '{}' failed with uncaught exception, " + "and it will not be retried locally because the elapsed time since start {} " + "exceeds {}.", computationId, key.toStringUtf8(), elapsedTimeSinceStart, MAX_LOCAL_PROCESSING_RETRY_DURATION, t);
            } else {
                LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception. " + "Work will be retried locally.", computationId, key.toStringUtf8(), t);
                retryLocally = true;
            }
        }
        if (retryLocally) {
            // Try again after some delay and at the end of the queue to avoid a tight loop.
            sleep(retryLocallyDelayMs);
            workUnitExecutor.forceExecute(work, work.getWorkItem().getSerializedSize());
        } else {
            // Consider the item invalid. It will eventually be retried by Windmill if it still needs to
            // be processed.
            computationState.completeWork(ShardedKey.create(key, workItem.getShardingKey()), workItem.getWorkToken());
        }
    } finally {
        // Update total processing time counters. Updating in finally clause ensures that
        // work items causing exceptions are also accounted in time spent.
        long processingTimeMsecs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - processingStartTimeNanos);
        stageInfo.totalProcessingMsecs.addValue(processingTimeMsecs);
        // either here or in DFE.
        if (work.getWorkItem().hasTimers()) {
            stageInfo.timerProcessingMsecs.addValue(processingTimeMsecs);
        }
        DataflowWorkerLoggingMDC.setWorkId(null);
        DataflowWorkerLoggingMDC.setStageName(null);
    }
}
Also used : MetricName(org.apache.beam.sdk.metrics.MetricName) MapTask(com.google.api.services.dataflow.model.MapTask) UserCodeException(org.apache.beam.sdk.util.UserCodeException) WindowedValueCoder(org.apache.beam.sdk.util.WindowedValue.WindowedValueCoder) MetricsLogger(org.apache.beam.runners.core.metrics.MetricsLogger) CommitWorkStream(org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.CommitWorkStream) CloudObjects(org.apache.beam.runners.dataflow.util.CloudObjects) ImmutableMap(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap) Map(java.util.Map) CreateRegisterFnOperationFunction(org.apache.beam.runners.dataflow.worker.graph.CreateRegisterFnOperationFunction) ScopedProfiler(org.apache.beam.runners.dataflow.worker.profiler.ScopedProfiler) StreamPool(org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.StreamPool) DataflowCounterUpdateExtractor(org.apache.beam.runners.dataflow.worker.counters.DataflowCounterUpdateExtractor) Uninterruptibles(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.Uninterruptibles) TimerTask(java.util.TimerTask) WorkItemStatus(com.google.api.services.dataflow.model.WorkItemStatus) WorkerStatusPages(org.apache.beam.runners.dataflow.worker.status.WorkerStatusPages) RegisterNodeFunction(org.apache.beam.runners.dataflow.worker.graph.RegisterNodeFunction) IdGenerator(org.apache.beam.sdk.fn.IdGenerator) PrintWriter(java.io.PrintWriter) KvCoder(org.apache.beam.sdk.coders.KvCoder) THROTTLING_MSECS_METRIC_NAME(org.apache.beam.runners.dataflow.worker.DataflowSystemMetrics.THROTTLING_MSECS_METRIC_NAME) ReadOperation(org.apache.beam.runners.dataflow.worker.util.common.worker.ReadOperation) CacheBuilder(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.CacheBuilder) Sleeper(org.apache.beam.sdk.util.Sleeper) StreamingModeExecutionStateRegistry(org.apache.beam.runners.dataflow.worker.StreamingModeExecutionContext.StreamingModeExecutionStateRegistry) DebugCapture(org.apache.beam.runners.dataflow.worker.status.DebugCapture) Executors(java.util.concurrent.Executors) BoundedQueueExecutor(org.apache.beam.runners.dataflow.worker.util.BoundedQueueExecutor) MultimapBuilder(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.MultimapBuilder) VisibleForTesting(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.annotations.VisibleForTesting) WorkItemCommitRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.WorkItemCommitRequest) AutoValue(com.google.auto.value.AutoValue) Counter(org.apache.beam.runners.dataflow.worker.counters.Counter) InsertFetchAndFilterStreamingSideInputNodes(org.apache.beam.runners.dataflow.worker.graph.InsertFetchAndFilterStreamingSideInputNodes) Capturable(org.apache.beam.runners.dataflow.worker.status.DebugCapture.Capturable) Networks(org.apache.beam.runners.dataflow.worker.graph.Networks) DeduceNodeLocationsFunction(org.apache.beam.runners.dataflow.worker.graph.DeduceNodeLocationsFunction) ExecutionStateTracker(org.apache.beam.runners.core.metrics.ExecutionStateTracker) Cache(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.cache.Cache) Duration(org.joda.time.Duration) Splitter(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Splitter) Optional(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Optional) ArrayList(java.util.ArrayList) CounterSet(org.apache.beam.runners.dataflow.worker.counters.CounterSet) Status(com.google.api.services.dataflow.model.Status) HttpServletRequest(javax.servlet.http.HttpServletRequest) EvictingQueue(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.EvictingQueue) GetWorkStream(org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub.GetWorkStream) Preconditions.checkArgument(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkArgument) RunnerApi(org.apache.beam.model.pipeline.v1.RunnerApi) Windmill(org.apache.beam.runners.dataflow.worker.windmill.Windmill) StreamingComputationConfig(com.google.api.services.dataflow.model.StreamingComputationConfig) DataflowRunner(org.apache.beam.runners.dataflow.DataflowRunner) IOException(java.io.IOException) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) CounterStructuredName(com.google.api.services.dataflow.model.CounterStructuredName) AtomicLong(java.util.concurrent.atomic.AtomicLong) MetricsEnvironment(org.apache.beam.sdk.metrics.MetricsEnvironment) DataflowWorkerLoggingMDC(org.apache.beam.runners.dataflow.worker.logging.DataflowWorkerLoggingMDC) RemoteGrpcPortNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.RemoteGrpcPortNode) ArrayDeque(java.util.ArrayDeque) FileSystems(org.apache.beam.sdk.io.FileSystems) StreamingPerStageSystemCounterNames(org.apache.beam.runners.dataflow.worker.DataflowSystemMetrics.StreamingPerStageSystemCounterNames) State(org.apache.beam.runners.dataflow.worker.StreamingDataflowWorker.Work.State) CounterUpdateAggregators(org.apache.beam.runners.dataflow.worker.counters.CounterUpdateAggregators) Edge(org.apache.beam.runners.dataflow.worker.graph.Edges.Edge) ReplacePgbkWithPrecombineFunction(org.apache.beam.runners.dataflow.worker.graph.ReplacePgbkWithPrecombineFunction) OutputObjectAndByteCounter(org.apache.beam.runners.dataflow.worker.util.common.worker.OutputObjectAndByteCounter) MoreObjects(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.MoreObjects) LoggerFactory(org.slf4j.LoggerFactory) Random(java.util.Random) Timer(java.util.Timer) MutableNetwork(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.graph.MutableNetwork) HostAndPort(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.net.HostAndPort) BackOff(org.apache.beam.sdk.util.BackOff) BackOffUtils(org.apache.beam.sdk.util.BackOffUtils) StatusDataProvider(org.apache.beam.runners.dataflow.worker.status.StatusDataProvider) DataflowWorkerHarnessOptions(org.apache.beam.runners.dataflow.options.DataflowWorkerHarnessOptions) DataflowRunner.hasExperiment(org.apache.beam.runners.dataflow.DataflowRunner.hasExperiment) Transport(org.apache.beam.sdk.extensions.gcp.util.Transport) Iterables(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables) NameContext(org.apache.beam.runners.dataflow.worker.counters.NameContext) ThreadFactory(java.util.concurrent.ThreadFactory) JvmInitializers(org.apache.beam.sdk.fn.JvmInitializers) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) SdkWorkerHarness(org.apache.beam.runners.dataflow.worker.SdkHarnessRegistry.SdkWorkerHarness) FixMultiOutputInfosOnParDoInstructions(org.apache.beam.runners.dataflow.worker.apiary.FixMultiOutputInfosOnParDoInstructions) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) LastExceptionDataProvider(org.apache.beam.runners.dataflow.worker.status.LastExceptionDataProvider) List(java.util.List) ListMultimap(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ListMultimap) Queue(java.util.Queue) BaseStatusServlet(org.apache.beam.runners.dataflow.worker.status.BaseStatusServlet) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) StreamingSystemCounterNames(org.apache.beam.runners.dataflow.worker.DataflowSystemMetrics.StreamingSystemCounterNames) IdGenerators(org.apache.beam.sdk.fn.IdGenerators) CustomSources(org.apache.beam.runners.dataflow.internal.CustomSources) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Coder(org.apache.beam.sdk.coders.Coder) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ExecutionStateSampler(org.apache.beam.runners.core.metrics.ExecutionStateSampler) Deque(java.util.Deque) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) WorkItem(com.google.api.services.dataflow.model.WorkItem) Function(java.util.function.Function) StreamingDataflowWorkerOptions(org.apache.beam.runners.dataflow.worker.options.StreamingDataflowWorkerOptions) TextFormat(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.TextFormat) ConcurrentMap(java.util.concurrent.ConcurrentMap) MemoryMonitor(org.apache.beam.runners.dataflow.worker.util.MemoryMonitor) HashSet(java.util.HashSet) DeduceFlattenLocationsFunction(org.apache.beam.runners.dataflow.worker.graph.DeduceFlattenLocationsFunction) StreamingConfigTask(com.google.api.services.dataflow.model.StreamingConfigTask) WindmillServerStub(org.apache.beam.runners.dataflow.worker.windmill.WindmillServerStub) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) CloudObject(org.apache.beam.runners.dataflow.util.CloudObject) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) Nullable(org.checkerframework.checker.nullness.qual.Nullable) CounterUpdate(com.google.api.services.dataflow.model.CounterUpdate) MapTaskToNetworkFunction(org.apache.beam.runners.dataflow.worker.graph.MapTaskToNetworkFunction) FluentBackoff(org.apache.beam.sdk.util.FluentBackoff) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) CloneAmbiguousFlattensFunction(org.apache.beam.runners.dataflow.worker.graph.CloneAmbiguousFlattensFunction) UTF_8(java.nio.charset.StandardCharsets.UTF_8) Semaphore(java.util.concurrent.Semaphore) Node(org.apache.beam.runners.dataflow.worker.graph.Nodes.Node) HttpServletResponse(javax.servlet.http.HttpServletResponse) TimeUnit(java.util.concurrent.TimeUnit) Preconditions(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions) Instant(org.joda.time.Instant) RemoteGrpcPort(org.apache.beam.model.fnexecution.v1.BeamFnApi.RemoteGrpcPort) Collections(java.util.Collections) LengthPrefixUnknownCoders(org.apache.beam.runners.dataflow.worker.graph.LengthPrefixUnknownCoders) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) RemoteGrpcPortNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.RemoteGrpcPortNode) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) Node(org.apache.beam.runners.dataflow.worker.graph.Nodes.Node) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) ByteString(org.apache.beam.vendor.grpc.v1p43p2.com.google.protobuf.ByteString) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) UserCodeException(org.apache.beam.sdk.util.UserCodeException) Windmill(org.apache.beam.runners.dataflow.worker.windmill.Windmill) ReadOperation(org.apache.beam.runners.dataflow.worker.util.common.worker.ReadOperation) OutputObjectAndByteCounter(org.apache.beam.runners.dataflow.worker.util.common.worker.OutputObjectAndByteCounter) CustomSources(org.apache.beam.runners.dataflow.internal.CustomSources) NameContext(org.apache.beam.runners.dataflow.worker.counters.NameContext) Duration(org.joda.time.Duration) UserCodeException(org.apache.beam.sdk.util.UserCodeException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) WorkItemCommitRequest(org.apache.beam.runners.dataflow.worker.windmill.Windmill.WorkItemCommitRequest) MapTask(com.google.api.services.dataflow.model.MapTask) CloudObject(org.apache.beam.runners.dataflow.util.CloudObject) Edge(org.apache.beam.runners.dataflow.worker.graph.Edges.Edge) File(java.io.File) Nullable(org.checkerframework.checker.nullness.qual.Nullable)

Example 15 with InstructionOutputNode

use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.

the class MapTaskToNetworkFunctionTest method testParDo.

@Test
public void testParDo() {
    InstructionOutput readOutput = createInstructionOutput("Read.out");
    ParallelInstruction read = createParallelInstruction("Read", readOutput);
    read.setRead(new ReadInstruction());
    MultiOutputInfo parDoMultiOutput = createMultiOutputInfo("output");
    ParDoInstruction parDoInstruction = new ParDoInstruction();
    // Read.out
    parDoInstruction.setInput(createInstructionInput(0, 0));
    parDoInstruction.setMultiOutputInfos(ImmutableList.of(parDoMultiOutput));
    InstructionOutput parDoOutput = createInstructionOutput("ParDo.out");
    ParallelInstruction parDo = createParallelInstruction("ParDo", parDoOutput);
    parDo.setParDo(parDoInstruction);
    MapTask mapTask = new MapTask();
    mapTask.setInstructions(ImmutableList.of(read, parDo));
    mapTask.setFactory(Transport.getJsonFactory());
    Network<Node, Edge> network = new MapTaskToNetworkFunction(IdGenerators.decrementingLongs()).apply(mapTask);
    assertNetworkProperties(network);
    assertEquals(4, network.nodes().size());
    assertEquals(3, network.edges().size());
    ParallelInstructionNode readNode = get(network, read);
    InstructionOutputNode readOutputNode = getOnlySuccessor(network, readNode);
    assertEquals(readOutput, readOutputNode.getInstructionOutput());
    ParallelInstructionNode parDoNode = getOnlySuccessor(network, readOutputNode);
    InstructionOutputNode parDoOutputNode = getOnlySuccessor(network, parDoNode);
    assertEquals(parDoOutput, parDoOutputNode.getInstructionOutput());
    assertEquals(parDoMultiOutput, ((MultiOutputInfoEdge) Iterables.getOnlyElement(network.edgesConnecting(parDoNode, parDoOutputNode))).getMultiOutputInfo());
}
Also used : ParallelInstruction(com.google.api.services.dataflow.model.ParallelInstruction) ParDoInstruction(com.google.api.services.dataflow.model.ParDoInstruction) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) MultiOutputInfo(com.google.api.services.dataflow.model.MultiOutputInfo) MapTask(com.google.api.services.dataflow.model.MapTask) InstructionOutputNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) Node(org.apache.beam.runners.dataflow.worker.graph.Nodes.Node) InstructionOutput(com.google.api.services.dataflow.model.InstructionOutput) ParallelInstructionNode(org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode) ReadInstruction(com.google.api.services.dataflow.model.ReadInstruction) Edge(org.apache.beam.runners.dataflow.worker.graph.Edges.Edge) DefaultEdge(org.apache.beam.runners.dataflow.worker.graph.Edges.DefaultEdge) MultiOutputInfoEdge(org.apache.beam.runners.dataflow.worker.graph.Edges.MultiOutputInfoEdge) Test(org.junit.Test)

Aggregations

InstructionOutputNode (org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode)21 Edge (org.apache.beam.runners.dataflow.worker.graph.Edges.Edge)19 Node (org.apache.beam.runners.dataflow.worker.graph.Nodes.Node)19 ParallelInstructionNode (org.apache.beam.runners.dataflow.worker.graph.Nodes.ParallelInstructionNode)19 DefaultEdge (org.apache.beam.runners.dataflow.worker.graph.Edges.DefaultEdge)14 InstructionOutput (com.google.api.services.dataflow.model.InstructionOutput)13 MultiOutputInfoEdge (org.apache.beam.runners.dataflow.worker.graph.Edges.MultiOutputInfoEdge)13 ParallelInstruction (com.google.api.services.dataflow.model.ParallelInstruction)11 Test (org.junit.Test)10 ReadInstruction (com.google.api.services.dataflow.model.ReadInstruction)9 MapTask (com.google.api.services.dataflow.model.MapTask)8 MultiOutputInfo (com.google.api.services.dataflow.model.MultiOutputInfo)5 ParDoInstruction (com.google.api.services.dataflow.model.ParDoInstruction)5 RunnerApi (org.apache.beam.model.pipeline.v1.RunnerApi)5 RemoteGrpcPortNode (org.apache.beam.runners.dataflow.worker.graph.Nodes.RemoteGrpcPortNode)5 CloudObject (org.apache.beam.runners.dataflow.util.CloudObject)4 ImmutableMap (org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap)4 WriteInstruction (com.google.api.services.dataflow.model.WriteInstruction)3 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3