use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.
the class RegisterNodeFunction method apply.
@Override
public Node apply(MutableNetwork<Node, Edge> input) {
for (Node node : input.nodes()) {
if (node instanceof RemoteGrpcPortNode || node instanceof ParallelInstructionNode || node instanceof InstructionOutputNode) {
continue;
}
throw new IllegalArgumentException(String.format("Network contains unknown type of node: %s", input));
}
// Fix all non output nodes to have named edges.
for (Node node : input.nodes()) {
if (node instanceof InstructionOutputNode) {
continue;
}
for (Node successor : input.successors(node)) {
for (Edge edge : input.edgesConnecting(node, successor)) {
if (edge instanceof DefaultEdge) {
input.removeEdge(edge);
input.addEdge(node, successor, MultiOutputInfoEdge.create(new MultiOutputInfo().setTag(idGenerator.getId())));
}
}
}
}
// We start off by replacing all edges within the graph with edges that have the named
// outputs from the predecessor step. For ParallelInstruction Source nodes and RemoteGrpcPort
// nodes this is a generated port id. All ParDoInstructions will have already
ProcessBundleDescriptor.Builder processBundleDescriptor = ProcessBundleDescriptor.newBuilder().setId(idGenerator.getId()).setStateApiServiceDescriptor(stateApiServiceDescriptor);
// For intermediate PCollections we fabricate, we make a bogus WindowingStrategy
// TODO: create a correct windowing strategy, including coders and environment
SdkComponents sdkComponents = SdkComponents.create(pipeline.getComponents(), null);
// Default to use the Java environment if pipeline doesn't have environment specified.
if (pipeline.getComponents().getEnvironmentsMap().isEmpty()) {
sdkComponents.registerEnvironment(Environments.JAVA_SDK_HARNESS_ENVIRONMENT);
}
String fakeWindowingStrategyId = "fakeWindowingStrategy" + idGenerator.getId();
try {
RunnerApi.MessageWithComponents fakeWindowingStrategyProto = WindowingStrategyTranslation.toMessageProto(WindowingStrategy.globalDefault(), sdkComponents);
processBundleDescriptor.putWindowingStrategies(fakeWindowingStrategyId, fakeWindowingStrategyProto.getWindowingStrategy()).putAllCoders(fakeWindowingStrategyProto.getComponents().getCodersMap()).putAllEnvironments(fakeWindowingStrategyProto.getComponents().getEnvironmentsMap());
} catch (IOException exc) {
throw new RuntimeException("Could not convert default windowing stratey to proto", exc);
}
Map<Node, String> nodesToPCollections = new HashMap<>();
ImmutableMap.Builder<String, NameContext> ptransformIdToNameContexts = ImmutableMap.builder();
ImmutableMap.Builder<String, Iterable<SideInputInfo>> ptransformIdToSideInputInfos = ImmutableMap.builder();
ImmutableMap.Builder<String, Iterable<PCollectionView<?>>> ptransformIdToPCollectionViews = ImmutableMap.builder();
ImmutableMap.Builder<String, NameContext> pcollectionIdToNameContexts = ImmutableMap.builder();
ImmutableMap.Builder<InstructionOutputNode, String> instructionOutputNodeToCoderIdBuilder = ImmutableMap.builder();
// 2. Generate new PCollectionId and register it with ProcessBundleDescriptor.
for (InstructionOutputNode node : Iterables.filter(input.nodes(), InstructionOutputNode.class)) {
InstructionOutput instructionOutput = node.getInstructionOutput();
String coderId = "generatedCoder" + idGenerator.getId();
instructionOutputNodeToCoderIdBuilder.put(node, coderId);
try (ByteString.Output output = ByteString.newOutput()) {
try {
Coder<?> javaCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(instructionOutput.getCodec()));
sdkComponents.registerCoder(javaCoder);
RunnerApi.Coder coderProto = CoderTranslation.toProto(javaCoder, sdkComponents);
processBundleDescriptor.putCoders(coderId, coderProto);
} catch (IOException e) {
throw new IllegalArgumentException(String.format("Unable to encode coder %s for output %s", instructionOutput.getCodec(), instructionOutput), e);
} catch (Exception e) {
// Coder probably wasn't a java coder
OBJECT_MAPPER.writeValue(output, instructionOutput.getCodec());
processBundleDescriptor.putCoders(coderId, RunnerApi.Coder.newBuilder().setSpec(RunnerApi.FunctionSpec.newBuilder().setPayload(output.toByteString())).build());
}
} catch (IOException e) {
throw new IllegalArgumentException(String.format("Unable to encode coder %s for output %s", instructionOutput.getCodec(), instructionOutput), e);
}
// Generate new PCollection ID and map it to relevant node.
// Will later be used to fill PTransform inputs/outputs information.
String pcollectionId = "generatedPcollection" + idGenerator.getId();
processBundleDescriptor.putPcollections(pcollectionId, RunnerApi.PCollection.newBuilder().setCoderId(coderId).setWindowingStrategyId(fakeWindowingStrategyId).build());
nodesToPCollections.put(node, pcollectionId);
pcollectionIdToNameContexts.put(pcollectionId, NameContext.create(null, instructionOutput.getOriginalName(), instructionOutput.getSystemName(), instructionOutput.getName()));
}
processBundleDescriptor.putAllCoders(sdkComponents.toComponents().getCodersMap());
Map<InstructionOutputNode, String> instructionOutputNodeToCoderIdMap = instructionOutputNodeToCoderIdBuilder.build();
for (ParallelInstructionNode node : Iterables.filter(input.nodes(), ParallelInstructionNode.class)) {
ParallelInstruction parallelInstruction = node.getParallelInstruction();
String ptransformId = "generatedPtransform" + idGenerator.getId();
ptransformIdToNameContexts.put(ptransformId, NameContext.create(null, parallelInstruction.getOriginalName(), parallelInstruction.getSystemName(), parallelInstruction.getName()));
RunnerApi.PTransform.Builder pTransform = RunnerApi.PTransform.newBuilder();
RunnerApi.FunctionSpec.Builder transformSpec = RunnerApi.FunctionSpec.newBuilder();
if (parallelInstruction.getParDo() != null) {
ParDoInstruction parDoInstruction = parallelInstruction.getParDo();
CloudObject userFnSpec = CloudObject.fromSpec(parDoInstruction.getUserFn());
String userFnClassName = userFnSpec.getClassName();
if ("CombineValuesFn".equals(userFnClassName) || "KeyedCombineFn".equals(userFnClassName)) {
transformSpec = transformCombineValuesFnToFunctionSpec(userFnSpec);
ptransformIdToPCollectionViews.put(ptransformId, Collections.emptyList());
} else {
String parDoPTransformId = getString(userFnSpec, PropertyNames.SERIALIZED_FN);
RunnerApi.PTransform parDoPTransform = pipeline.getComponents().getTransformsOrDefault(parDoPTransformId, null);
// TODO: only the non-null branch should exist; for migration ease only
if (parDoPTransform != null) {
checkArgument(parDoPTransform.getSpec().getUrn().equals(PTransformTranslation.PAR_DO_TRANSFORM_URN), "Found transform \"%s\" for ParallelDo instruction, " + " but that transform had unexpected URN \"%s\" (expected \"%s\")", parDoPTransformId, parDoPTransform.getSpec().getUrn(), PTransformTranslation.PAR_DO_TRANSFORM_URN);
RunnerApi.ParDoPayload parDoPayload;
try {
parDoPayload = RunnerApi.ParDoPayload.parseFrom(parDoPTransform.getSpec().getPayload());
} catch (InvalidProtocolBufferException exc) {
throw new RuntimeException("ParDo did not have a ParDoPayload", exc);
}
ImmutableList.Builder<PCollectionView<?>> pcollectionViews = ImmutableList.builder();
for (Map.Entry<String, SideInput> sideInputEntry : parDoPayload.getSideInputsMap().entrySet()) {
pcollectionViews.add(transformSideInputForRunner(pipeline, parDoPTransform, sideInputEntry.getKey(), sideInputEntry.getValue()));
transformSideInputForSdk(pipeline, parDoPTransform, sideInputEntry.getKey(), processBundleDescriptor, pTransform);
}
ptransformIdToPCollectionViews.put(ptransformId, pcollectionViews.build());
transformSpec.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN).setPayload(parDoPayload.toByteString());
} else {
// legacy path - bytes are the FunctionSpec's payload field, basically, and
// SDKs expect it in the PTransform's payload field
byte[] userFnBytes = getBytes(userFnSpec, PropertyNames.SERIALIZED_FN);
transformSpec.setUrn(ParDoTranslation.CUSTOM_JAVA_DO_FN_URN).setPayload(ByteString.copyFrom(userFnBytes));
}
// Add side input information for batch pipelines
if (parDoInstruction.getSideInputs() != null) {
ptransformIdToSideInputInfos.put(ptransformId, forSideInputInfos(parDoInstruction.getSideInputs(), true));
}
}
} else if (parallelInstruction.getRead() != null) {
ReadInstruction readInstruction = parallelInstruction.getRead();
CloudObject sourceSpec = CloudObject.fromSpec(CloudSourceUtils.flattenBaseSpecs(readInstruction.getSource()).getSpec());
// TODO: Need to plumb through the SDK specific function spec.
transformSpec.setUrn(JAVA_SOURCE_URN);
try {
byte[] serializedSource = Base64.getDecoder().decode(getString(sourceSpec, SERIALIZED_SOURCE));
ByteString sourceByteString = ByteString.copyFrom(serializedSource);
transformSpec.setPayload(sourceByteString);
} catch (Exception e) {
throw new IllegalArgumentException(String.format("Unable to process Read %s", parallelInstruction), e);
}
} else if (parallelInstruction.getFlatten() != null) {
transformSpec.setUrn(PTransformTranslation.FLATTEN_TRANSFORM_URN);
} else {
throw new IllegalArgumentException(String.format("Unknown type of ParallelInstruction %s", parallelInstruction));
}
for (Node predecessorOutput : input.predecessors(node)) {
pTransform.putInputs("generatedInput" + idGenerator.getId(), nodesToPCollections.get(predecessorOutput));
}
for (Edge edge : input.outEdges(node)) {
Node nodeOutput = input.incidentNodes(edge).target();
MultiOutputInfoEdge edge2 = (MultiOutputInfoEdge) edge;
pTransform.putOutputs(edge2.getMultiOutputInfo().getTag(), nodesToPCollections.get(nodeOutput));
}
pTransform.setSpec(transformSpec);
processBundleDescriptor.putTransforms(ptransformId, pTransform.build());
}
// Add the PTransforms representing the remote gRPC nodes
for (RemoteGrpcPortNode node : Iterables.filter(input.nodes(), RemoteGrpcPortNode.class)) {
RunnerApi.PTransform.Builder pTransform = RunnerApi.PTransform.newBuilder();
Set<Node> predecessors = input.predecessors(node);
Set<Node> successors = input.successors(node);
if (predecessors.isEmpty() && !successors.isEmpty()) {
Node instructionOutputNode = Iterables.getOnlyElement(successors);
pTransform.putOutputs("generatedOutput" + idGenerator.getId(), nodesToPCollections.get(instructionOutputNode));
pTransform.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).setPayload(node.getRemoteGrpcPort().toBuilder().setCoderId(instructionOutputNodeToCoderIdMap.get(instructionOutputNode)).build().toByteString()).build());
} else if (!predecessors.isEmpty() && successors.isEmpty()) {
Node instructionOutputNode = Iterables.getOnlyElement(predecessors);
pTransform.putInputs("generatedInput" + idGenerator.getId(), nodesToPCollections.get(instructionOutputNode));
pTransform.setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_OUTPUT_URN).setPayload(node.getRemoteGrpcPort().toBuilder().setCoderId(instructionOutputNodeToCoderIdMap.get(instructionOutputNode)).build().toByteString()).build());
} else {
throw new IllegalStateException("Expected either one input OR one output " + "InstructionOutputNode for this RemoteGrpcPortNode");
}
processBundleDescriptor.putTransforms(node.getPrimitiveTransformId(), pTransform.build());
}
return RegisterRequestNode.create(RegisterRequest.newBuilder().addProcessBundleDescriptor(processBundleDescriptor).build(), ptransformIdToNameContexts.build(), ptransformIdToSideInputInfos.build(), ptransformIdToPCollectionViews.build(), pcollectionIdToNameContexts.build());
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.
the class CloneAmbiguousFlattensFunction method cloneFlatten.
/**
* A helper function which performs the actual cloning procedure, which means creating the runner
* and SDK versions of both the ambiguous flatten and its PCollection, attaching the old flatten's
* predecessors and successors properly, and then removing the ambiguous flatten from the network.
*/
private void cloneFlatten(Node flatten, MutableNetwork<Node, Edge> network) {
// Start by creating the clones of the flatten and its PCollection.
InstructionOutputNode flattenOut = (InstructionOutputNode) Iterables.getOnlyElement(network.successors(flatten));
ParallelInstruction flattenInstruction = ((ParallelInstructionNode) flatten).getParallelInstruction();
Node runnerFlatten = ParallelInstructionNode.create(flattenInstruction, ExecutionLocation.RUNNER_HARNESS);
Node runnerFlattenOut = InstructionOutputNode.create(flattenOut.getInstructionOutput(), flattenOut.getPcollectionId());
network.addNode(runnerFlatten);
network.addNode(runnerFlattenOut);
Node sdkFlatten = ParallelInstructionNode.create(flattenInstruction, ExecutionLocation.SDK_HARNESS);
Node sdkFlattenOut = InstructionOutputNode.create(flattenOut.getInstructionOutput(), flattenOut.getPcollectionId());
network.addNode(sdkFlatten);
network.addNode(sdkFlattenOut);
for (Edge edge : ImmutableList.copyOf(network.edgesConnecting(flatten, flattenOut))) {
network.addEdge(runnerFlatten, runnerFlattenOut, edge.clone());
network.addEdge(sdkFlatten, sdkFlattenOut, edge.clone());
}
// Copy over predecessor edges to both cloned nodes.
for (Node predecessor : network.predecessors(flatten)) {
for (Edge edge : ImmutableList.copyOf(network.edgesConnecting(predecessor, flatten))) {
network.addEdge(predecessor, runnerFlatten, edge.clone());
network.addEdge(predecessor, sdkFlatten, edge.clone());
}
}
// Copy over successor edges depending on execution locations of successors.
for (Node successor : network.successors(flattenOut)) {
// Connect successor to SDK harness only if sure it executes in SDK.
Node selectedOutput = executesInSdkHarness(successor) ? sdkFlattenOut : runnerFlattenOut;
for (Edge edge : ImmutableList.copyOf(network.edgesConnecting(flattenOut, successor))) {
network.addEdge(selectedOutput, successor, edge.clone());
}
}
network.removeNode(flatten);
network.removeNode(flattenOut);
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.
the class BeamFnMapTaskExecutorFactory method createOutputReceiversTransform.
/**
* Returns a function which can convert {@link InstructionOutput}s into {@link OutputReceiver}s.
*/
static Function<Node, Node> createOutputReceiversTransform(final String stageName, final CounterFactory counterFactory) {
return new TypeSafeNodeFunction<InstructionOutputNode>(InstructionOutputNode.class) {
@Override
public Node typedApply(InstructionOutputNode input) {
InstructionOutput cloudOutput = input.getInstructionOutput();
OutputReceiver outputReceiver = new OutputReceiver();
Coder<?> coder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(cloudOutput.getCodec()));
@SuppressWarnings("unchecked") ElementCounter outputCounter = new DataflowOutputCounter(cloudOutput.getName(), new ElementByteSizeObservableCoder<>(coder), counterFactory, NameContext.create(stageName, cloudOutput.getOriginalName(), cloudOutput.getSystemName(), cloudOutput.getName()));
outputReceiver.addOutputCounter(outputCounter);
return OutputReceiverNode.create(outputReceiver, coder, input.getPcollectionId());
}
};
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.
the class StreamingDataflowWorker method process.
private void process(final SdkWorkerHarness worker, final ComputationState computationState, final Instant inputDataWatermark, @Nullable final Instant outputDataWatermark, @Nullable final Instant synchronizedProcessingTime, final Work work) {
final Windmill.WorkItem workItem = work.getWorkItem();
final String computationId = computationState.getComputationId();
final ByteString key = workItem.getKey();
work.setState(State.PROCESSING);
{
StringBuilder workIdBuilder = new StringBuilder(33);
workIdBuilder.append(Long.toHexString(workItem.getShardingKey()));
workIdBuilder.append('-');
workIdBuilder.append(Long.toHexString(workItem.getWorkToken()));
DataflowWorkerLoggingMDC.setWorkId(workIdBuilder.toString());
}
DataflowWorkerLoggingMDC.setStageName(computationId);
LOG.debug("Starting processing for {}:\n{}", computationId, work);
Windmill.WorkItemCommitRequest.Builder outputBuilder = initializeOutputBuilder(key, workItem);
// Before any processing starts, call any pending OnCommit callbacks. Nothing that requires
// cleanup should be done before this, since we might exit early here.
callFinalizeCallbacks(workItem);
if (workItem.getSourceState().getOnlyFinalize()) {
outputBuilder.setSourceStateUpdates(Windmill.SourceState.newBuilder().setOnlyFinalize(true));
work.setState(State.COMMIT_QUEUED);
commitQueue.put(new Commit(outputBuilder.build(), computationState, work));
return;
}
long processingStartTimeNanos = System.nanoTime();
final MapTask mapTask = computationState.getMapTask();
StageInfo stageInfo = stageInfoMap.computeIfAbsent(mapTask.getStageName(), s -> new StageInfo(s, mapTask.getSystemName(), this));
ExecutionState executionState = null;
try {
executionState = computationState.getExecutionStateQueue(worker).poll();
if (executionState == null) {
MutableNetwork<Node, Edge> mapTaskNetwork = mapTaskToNetwork.apply(mapTask);
if (LOG.isDebugEnabled()) {
LOG.debug("Network as Graphviz .dot: {}", Networks.toDot(mapTaskNetwork));
}
ParallelInstructionNode readNode = (ParallelInstructionNode) Iterables.find(mapTaskNetwork.nodes(), node -> node instanceof ParallelInstructionNode && ((ParallelInstructionNode) node).getParallelInstruction().getRead() != null);
InstructionOutputNode readOutputNode = (InstructionOutputNode) Iterables.getOnlyElement(mapTaskNetwork.successors(readNode));
DataflowExecutionContext.DataflowExecutionStateTracker executionStateTracker = new DataflowExecutionContext.DataflowExecutionStateTracker(ExecutionStateSampler.instance(), stageInfo.executionStateRegistry.getState(NameContext.forStage(mapTask.getStageName()), "other", null, ScopedProfiler.INSTANCE.emptyScope()), stageInfo.deltaCounters, options, computationId);
StreamingModeExecutionContext context = new StreamingModeExecutionContext(pendingDeltaCounters, computationId, readerCache, !computationState.getTransformUserNameToStateFamily().isEmpty() ? computationState.getTransformUserNameToStateFamily() : stateNameMap, stateCache.forComputation(computationId), stageInfo.metricsContainerRegistry, executionStateTracker, stageInfo.executionStateRegistry, maxSinkBytes);
DataflowMapTaskExecutor mapTaskExecutor = mapTaskExecutorFactory.create(worker.getControlClientHandler(), worker.getGrpcDataFnServer(), sdkHarnessRegistry.beamFnDataApiServiceDescriptor(), worker.getGrpcStateFnServer(), mapTaskNetwork, options, mapTask.getStageName(), readerRegistry, sinkRegistry, context, pendingDeltaCounters, idGenerator);
ReadOperation readOperation = mapTaskExecutor.getReadOperation();
// Disable progress updates since its results are unused for streaming
// and involves starting a thread.
readOperation.setProgressUpdatePeriodMs(ReadOperation.DONT_UPDATE_PERIODICALLY);
Preconditions.checkState(mapTaskExecutor.supportsRestart(), "Streaming runner requires all operations support restart.");
Coder<?> readCoder;
readCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(readOutputNode.getInstructionOutput().getCodec()));
Coder<?> keyCoder = extractKeyCoder(readCoder);
// If using a custom source, count bytes read for autoscaling.
if (CustomSources.class.getName().equals(readNode.getParallelInstruction().getRead().getSource().getSpec().get("@type"))) {
NameContext nameContext = NameContext.create(mapTask.getStageName(), readNode.getParallelInstruction().getOriginalName(), readNode.getParallelInstruction().getSystemName(), readNode.getParallelInstruction().getName());
readOperation.receivers[0].addOutputCounter(new OutputObjectAndByteCounter(new IntrinsicMapTaskExecutorFactory.ElementByteSizeObservableCoder<>(readCoder), mapTaskExecutor.getOutputCounters(), nameContext).setSamplingPeriod(100).countBytes("dataflow_input_size-" + mapTask.getSystemName()));
}
executionState = new ExecutionState(mapTaskExecutor, context, keyCoder, executionStateTracker);
}
WindmillStateReader stateReader = new WindmillStateReader(metricTrackingWindmillServer, computationId, key, workItem.getShardingKey(), workItem.getWorkToken());
StateFetcher localStateFetcher = stateFetcher.byteTrackingView();
// If the read output KVs, then we can decode Windmill's byte key into a userland
// key object and provide it to the execution context for use with per-key state.
// Otherwise, we pass null.
//
// The coder type that will be present is:
// WindowedValueCoder(TimerOrElementCoder(KvCoder))
@Nullable Coder<?> keyCoder = executionState.getKeyCoder();
@Nullable Object executionKey = keyCoder == null ? null : keyCoder.decode(key.newInput(), Coder.Context.OUTER);
if (workItem.hasHotKeyInfo()) {
Windmill.HotKeyInfo hotKeyInfo = workItem.getHotKeyInfo();
Duration hotKeyAge = Duration.millis(hotKeyInfo.getHotKeyAgeUsec() / 1000);
// The MapTask instruction is ordered by dependencies, such that the first element is
// always going to be the shuffle task.
String stepName = computationState.getMapTask().getInstructions().get(0).getName();
if (options.isHotKeyLoggingEnabled() && keyCoder != null) {
hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge, executionKey);
} else {
hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge);
}
}
executionState.getContext().start(executionKey, workItem, inputDataWatermark, outputDataWatermark, synchronizedProcessingTime, stateReader, localStateFetcher, outputBuilder);
// Blocks while executing work.
executionState.getWorkExecutor().execute();
Iterables.addAll(this.pendingMonitoringInfos, executionState.getWorkExecutor().extractMetricUpdates());
commitCallbacks.putAll(executionState.getContext().flushState());
// Release the execution state for another thread to use.
computationState.getExecutionStateQueue(worker).offer(executionState);
executionState = null;
// Add the output to the commit queue.
work.setState(State.COMMIT_QUEUED);
WorkItemCommitRequest commitRequest = outputBuilder.build();
int byteLimit = maxWorkItemCommitBytes;
int commitSize = commitRequest.getSerializedSize();
int estimatedCommitSize = commitSize < 0 ? Integer.MAX_VALUE : commitSize;
// Detect overflow of integer serialized size or if the byte limit was exceeded.
windmillMaxObservedWorkItemCommitBytes.addValue(estimatedCommitSize);
if (commitSize < 0 || commitSize > byteLimit) {
KeyCommitTooLargeException e = KeyCommitTooLargeException.causedBy(computationId, byteLimit, commitRequest);
reportFailure(computationId, workItem, e);
LOG.error(e.toString());
// Drop the current request in favor of a new, minimal one requesting truncation.
// Messages, timers, counters, and other commit content will not be used by the service
// so we're purposefully dropping them here
commitRequest = buildWorkItemTruncationRequest(key, workItem, estimatedCommitSize);
}
commitQueue.put(new Commit(commitRequest, computationState, work));
// Compute shuffle and state byte statistics these will be flushed asynchronously.
long stateBytesWritten = outputBuilder.clearOutputMessages().build().getSerializedSize();
long shuffleBytesRead = 0;
for (Windmill.InputMessageBundle bundle : workItem.getMessageBundlesList()) {
for (Windmill.Message message : bundle.getMessagesList()) {
shuffleBytesRead += message.getSerializedSize();
}
}
long stateBytesRead = stateReader.getBytesRead() + localStateFetcher.getBytesRead();
windmillShuffleBytesRead.addValue(shuffleBytesRead);
windmillStateBytesRead.addValue(stateBytesRead);
windmillStateBytesWritten.addValue(stateBytesWritten);
LOG.debug("Processing done for work token: {}", workItem.getWorkToken());
} catch (Throwable t) {
if (executionState != null) {
try {
executionState.getContext().invalidateCache();
executionState.getWorkExecutor().close();
} catch (Exception e) {
LOG.warn("Failed to close map task executor: ", e);
} finally {
// Release references to potentially large objects early.
executionState = null;
}
}
t = t instanceof UserCodeException ? t.getCause() : t;
boolean retryLocally = false;
if (KeyTokenInvalidException.isKeyTokenInvalidException(t)) {
LOG.debug("Execution of work for computation '{}' on key '{}' failed due to token expiration. " + "Work will not be retried locally.", computationId, key.toStringUtf8());
} else {
LastExceptionDataProvider.reportException(t);
LOG.debug("Failed work: {}", work);
Duration elapsedTimeSinceStart = new Duration(Instant.now(), work.getStartTime());
if (!reportFailure(computationId, workItem, t)) {
LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception, " + "and Windmill indicated not to retry locally.", computationId, key.toStringUtf8(), t);
} else if (isOutOfMemoryError(t)) {
File heapDump = memoryMonitor.tryToDumpHeap();
LOG.error("Execution of work for computation '{}' for key '{}' failed with out-of-memory. " + "Work will not be retried locally. Heap dump {}.", computationId, key.toStringUtf8(), heapDump == null ? "not written" : ("written to '" + heapDump + "'"), t);
} else if (elapsedTimeSinceStart.isLongerThan(MAX_LOCAL_PROCESSING_RETRY_DURATION)) {
LOG.error("Execution of work for computation '{}' for key '{}' failed with uncaught exception, " + "and it will not be retried locally because the elapsed time since start {} " + "exceeds {}.", computationId, key.toStringUtf8(), elapsedTimeSinceStart, MAX_LOCAL_PROCESSING_RETRY_DURATION, t);
} else {
LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception. " + "Work will be retried locally.", computationId, key.toStringUtf8(), t);
retryLocally = true;
}
}
if (retryLocally) {
// Try again after some delay and at the end of the queue to avoid a tight loop.
sleep(retryLocallyDelayMs);
workUnitExecutor.forceExecute(work, work.getWorkItem().getSerializedSize());
} else {
// Consider the item invalid. It will eventually be retried by Windmill if it still needs to
// be processed.
computationState.completeWork(ShardedKey.create(key, workItem.getShardingKey()), workItem.getWorkToken());
}
} finally {
// Update total processing time counters. Updating in finally clause ensures that
// work items causing exceptions are also accounted in time spent.
long processingTimeMsecs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - processingStartTimeNanos);
stageInfo.totalProcessingMsecs.addValue(processingTimeMsecs);
// either here or in DFE.
if (work.getWorkItem().hasTimers()) {
stageInfo.timerProcessingMsecs.addValue(processingTimeMsecs);
}
DataflowWorkerLoggingMDC.setWorkId(null);
DataflowWorkerLoggingMDC.setStageName(null);
}
}
use of org.apache.beam.runners.dataflow.worker.graph.Nodes.InstructionOutputNode in project beam by apache.
the class MapTaskToNetworkFunctionTest method testParDo.
@Test
public void testParDo() {
InstructionOutput readOutput = createInstructionOutput("Read.out");
ParallelInstruction read = createParallelInstruction("Read", readOutput);
read.setRead(new ReadInstruction());
MultiOutputInfo parDoMultiOutput = createMultiOutputInfo("output");
ParDoInstruction parDoInstruction = new ParDoInstruction();
// Read.out
parDoInstruction.setInput(createInstructionInput(0, 0));
parDoInstruction.setMultiOutputInfos(ImmutableList.of(parDoMultiOutput));
InstructionOutput parDoOutput = createInstructionOutput("ParDo.out");
ParallelInstruction parDo = createParallelInstruction("ParDo", parDoOutput);
parDo.setParDo(parDoInstruction);
MapTask mapTask = new MapTask();
mapTask.setInstructions(ImmutableList.of(read, parDo));
mapTask.setFactory(Transport.getJsonFactory());
Network<Node, Edge> network = new MapTaskToNetworkFunction(IdGenerators.decrementingLongs()).apply(mapTask);
assertNetworkProperties(network);
assertEquals(4, network.nodes().size());
assertEquals(3, network.edges().size());
ParallelInstructionNode readNode = get(network, read);
InstructionOutputNode readOutputNode = getOnlySuccessor(network, readNode);
assertEquals(readOutput, readOutputNode.getInstructionOutput());
ParallelInstructionNode parDoNode = getOnlySuccessor(network, readOutputNode);
InstructionOutputNode parDoOutputNode = getOnlySuccessor(network, parDoNode);
assertEquals(parDoOutput, parDoOutputNode.getInstructionOutput());
assertEquals(parDoMultiOutput, ((MultiOutputInfoEdge) Iterables.getOnlyElement(network.edgesConnecting(parDoNode, parDoOutputNode))).getMultiOutputInfo());
}
Aggregations