use of org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements.Timers in project beam by apache.
the class ProcessBundleDescriptors method fromExecutableStageInternal.
private static ExecutableProcessBundleDescriptor fromExecutableStageInternal(String id, ExecutableStage stage, ApiServiceDescriptor dataEndpoint, @Nullable ApiServiceDescriptor stateEndpoint) throws IOException {
// Create with all of the processing transforms, and all of the components.
// TODO: Remove the unreachable subcomponents if the size of the descriptor matters.
Map<String, PTransform> stageTransforms = stage.getTransforms().stream().collect(Collectors.toMap(PTransformNode::getId, PTransformNode::getTransform));
Components.Builder components = stage.getComponents().toBuilder().clearTransforms().putAllTransforms(stageTransforms);
ImmutableList.Builder<RemoteInputDestination> inputDestinationsBuilder = ImmutableList.builder();
ImmutableMap.Builder<String, Coder> remoteOutputCodersBuilder = ImmutableMap.builder();
WireCoderSetting wireCoderSetting = stage.getWireCoderSettings().stream().filter(ws -> ws.getInputOrOutputId().equals(stage.getInputPCollection().getId())).findAny().orElse(WireCoderSetting.getDefaultInstance());
// The order of these does not matter.
inputDestinationsBuilder.add(addStageInput(dataEndpoint, stage.getInputPCollection(), components, wireCoderSetting));
remoteOutputCodersBuilder.putAll(addStageOutputs(dataEndpoint, stage.getOutputPCollections(), components, stage.getWireCoderSettings()));
Map<String, Map<String, SideInputSpec>> sideInputSpecs = addSideInputs(stage, components);
Map<String, Map<String, BagUserStateSpec>> bagUserStateSpecs = forBagUserStates(stage, components.build());
Map<String, Map<String, TimerSpec>> timerSpecs = forTimerSpecs(stage, components);
lengthPrefixAnyInputCoder(stage.getInputPCollection().getId(), components);
// Copy data from components to ProcessBundleDescriptor.
ProcessBundleDescriptor.Builder bundleDescriptorBuilder = ProcessBundleDescriptor.newBuilder().setId(id);
if (stateEndpoint != null) {
bundleDescriptorBuilder.setStateApiServiceDescriptor(stateEndpoint);
}
if (timerSpecs.size() > 0) {
// By default use the data endpoint for timers, in the future considering enabling specifying
// a different ApiServiceDescriptor for timers.
bundleDescriptorBuilder.setTimerApiServiceDescriptor(dataEndpoint);
}
bundleDescriptorBuilder.putAllCoders(components.getCodersMap()).putAllEnvironments(components.getEnvironmentsMap()).putAllPcollections(components.getPcollectionsMap()).putAllWindowingStrategies(components.getWindowingStrategiesMap()).putAllTransforms(components.getTransformsMap());
return ExecutableProcessBundleDescriptor.of(bundleDescriptorBuilder.build(), inputDestinationsBuilder.build(), remoteOutputCodersBuilder.build(), sideInputSpecs, bagUserStateSpecs, timerSpecs);
}
use of org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements.Timers in project beam by apache.
the class BeamFnDataOutboundAggregator method sendOrCollectBufferedDataAndFinishOutboundStreams.
/**
* Closes the streams for all registered outbound endpoints. Should be called at the end of each
* bundle. Returns the buffered Elements if the BeamFnDataOutboundAggregator started with
* collectElementsIfNoFlushes=true, and there was no previous flush in this bundle, otherwise
* returns null.
*/
public Elements sendOrCollectBufferedDataAndFinishOutboundStreams() {
if (outputTimersReceivers.isEmpty() && outputDataReceivers.isEmpty()) {
return null;
}
Elements.Builder bufferedElements;
if (timeLimit > 0) {
synchronized (flushLock) {
bufferedElements = convertBufferForTransmission();
}
} else {
bufferedElements = convertBufferForTransmission();
}
LOG.debug("Closing streams for instruction {} and outbound data {} and timers {}.", processBundleRequestIdSupplier.get(), outputDataReceivers, outputTimersReceivers);
for (Map.Entry<String, Receiver<?>> entry : outputDataReceivers.entrySet()) {
String pTransformId = entry.getKey();
bufferedElements.addDataBuilder().setInstructionId(processBundleRequestIdSupplier.get()).setTransformId(pTransformId).setIsLast(true);
entry.getValue().resetStats();
}
for (Map.Entry<TimerEndpoint, Receiver<?>> entry : outputTimersReceivers.entrySet()) {
TimerEndpoint timerKey = entry.getKey();
bufferedElements.addTimersBuilder().setInstructionId(processBundleRequestIdSupplier.get()).setTransformId(timerKey.pTransformId).setTimerFamilyId(timerKey.timerFamilyId).setIsLast(true);
entry.getValue().resetStats();
}
if (collectElementsIfNoFlushes && !hasFlushedForBundle) {
return bufferedElements.build();
}
outboundObserver.onNext(bufferedElements.build());
// This is now at the end of a bundle, so we reset hasFlushedForBundle to prepare for new
// bundles.
hasFlushedForBundle = false;
return null;
}
use of org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements.Timers in project beam by apache.
the class BeamFnDataInboundObserver2 method multiplexElements.
/**
* Dispatches the data and timers from the elements to corresponding receivers. Returns true if
* all the endpoints are done after elements dispatching.
*/
public boolean multiplexElements(Elements elements) throws Exception {
for (BeamFnApi.Elements.Data data : elements.getDataList()) {
EndpointStatus<DataEndpoint<?>> endpoint = transformIdToDataEndpoint.get(data.getTransformId());
if (endpoint == null) {
throw new IllegalStateException(String.format("Unable to find inbound data receiver for instruction %s and transform %s.", data.getInstructionId(), data.getTransformId()));
} else if (endpoint.isDone) {
throw new IllegalStateException(String.format("Received data after inbound data receiver is done for instruction %s and transform %s.", data.getInstructionId(), data.getTransformId()));
}
InputStream inputStream = data.getData().newInput();
Coder<Object> coder = (Coder<Object>) endpoint.endpoint.getCoder();
FnDataReceiver<Object> receiver = (FnDataReceiver<Object>) endpoint.endpoint.getReceiver();
while (inputStream.available() > 0) {
receiver.accept(coder.decode(inputStream));
}
if (data.getIsLast()) {
endpoint.isDone = true;
numEndpointsThatAreIncomplete -= 1;
}
}
for (BeamFnApi.Elements.Timers timers : elements.getTimersList()) {
Map<String, EndpointStatus<TimerEndpoint<?>>> timerFamilyIdToEndpoints = transformIdToTimerFamilyIdToTimerEndpoint.get(timers.getTransformId());
if (timerFamilyIdToEndpoints == null) {
throw new IllegalStateException(String.format("Unable to find inbound timer receiver for instruction %s, transform %s, and timer family %s.", timers.getInstructionId(), timers.getTransformId(), timers.getTimerFamilyId()));
}
EndpointStatus<TimerEndpoint<?>> endpoint = timerFamilyIdToEndpoints.get(timers.getTimerFamilyId());
if (endpoint == null) {
throw new IllegalStateException(String.format("Unable to find inbound timer receiver for instruction %s, transform %s, and timer family %s.", timers.getInstructionId(), timers.getTransformId(), timers.getTimerFamilyId()));
} else if (endpoint.isDone) {
throw new IllegalStateException(String.format("Received timer after inbound timer receiver is done for instruction %s, transform %s, and timer family %s.", timers.getInstructionId(), timers.getTransformId(), timers.getTimerFamilyId()));
}
InputStream inputStream = timers.getTimers().newInput();
Coder<Object> coder = (Coder<Object>) endpoint.endpoint.getCoder();
FnDataReceiver<Object> receiver = (FnDataReceiver<Object>) endpoint.endpoint.getReceiver();
while (inputStream.available() > 0) {
receiver.accept(coder.decode(inputStream));
}
if (timers.getIsLast()) {
endpoint.isDone = true;
numEndpointsThatAreIncomplete -= 1;
}
}
return numEndpointsThatAreIncomplete == 0;
}
use of org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements.Timers in project beam by apache.
the class ExecutableStageDoFnOperatorTest method testWatermarkHandling.
@Test
public void testWatermarkHandling() throws Exception {
TupleTag<Integer> mainOutput = new TupleTag<>("main-output");
DoFnOperator.MultiOutputOutputManagerFactory<Integer> outputManagerFactory = new DoFnOperator.MultiOutputOutputManagerFactory(mainOutput, VoidCoder.of(), new SerializablePipelineOptions(FlinkPipelineOptions.defaults()));
ExecutableStageDoFnOperator<KV<String, Integer>, Integer> operator = getOperator(mainOutput, Collections.emptyList(), outputManagerFactory, WindowingStrategy.of(FixedWindows.of(Duration.millis(10))), StringUtf8Coder.of(), WindowedValue.getFullCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()), IntervalWindow.getCoder()));
KeyedOneInputStreamOperatorTestHarness<String, WindowedValue<KV<String, Integer>>, WindowedValue<Integer>> testHarness = new KeyedOneInputStreamOperatorTestHarness<>(operator, val -> val.getValue().getKey(), new CoderTypeInformation<>(StringUtf8Coder.of(), FlinkPipelineOptions.defaults()));
RemoteBundle bundle = Mockito.mock(RemoteBundle.class);
when(bundle.getInputReceivers()).thenReturn(ImmutableMap.<String, FnDataReceiver<WindowedValue>>builder().put("input", Mockito.mock(FnDataReceiver.class)).build());
when(bundle.getTimerReceivers()).thenReturn(ImmutableMap.<KV<String, String>, FnDataReceiver<WindowedValue>>builder().put(KV.of("transform", "timer"), Mockito.mock(FnDataReceiver.class)).put(KV.of("transform", "timer2"), Mockito.mock(FnDataReceiver.class)).put(KV.of("transform", "timer3"), Mockito.mock(FnDataReceiver.class)).build());
when(stageBundleFactory.getBundle(any(), any(), any(), any(), any(), any())).thenReturn(bundle);
testHarness.open();
assertThat(operator.getCurrentOutputWatermark(), is(BoundedWindow.TIMESTAMP_MIN_VALUE.getMillis()));
// No bundle has been started, watermark can be freely advanced
testHarness.processWatermark(0);
assertThat(operator.getCurrentOutputWatermark(), is(0L));
// Trigger a new bundle
IntervalWindow intervalWindow = new IntervalWindow(new Instant(0), new Instant(9));
WindowedValue<KV<String, Integer>> windowedValue = WindowedValue.of(KV.of("one", 1), Instant.now(), intervalWindow, PaneInfo.NO_FIRING);
testHarness.processElement(new StreamRecord<>(windowedValue));
// The output watermark should be held back during the bundle
testHarness.processWatermark(1);
assertThat(operator.getEffectiveInputWatermark(), is(1L));
assertThat(operator.getCurrentOutputWatermark(), is(0L));
// After the bundle has been finished, the watermark should be advanced
operator.invokeFinishBundle();
assertThat(operator.getCurrentOutputWatermark(), is(1L));
// Bundle finished, watermark can be freely advanced
testHarness.processWatermark(2);
assertThat(operator.getEffectiveInputWatermark(), is(2L));
assertThat(operator.getCurrentOutputWatermark(), is(2L));
// Trigger a new bundle
testHarness.processElement(new StreamRecord<>(windowedValue));
// cleanup timer
assertThat(testHarness.numEventTimeTimers(), is(1));
// Set at timer
Instant timerTarget = new Instant(5);
Instant timerTarget2 = new Instant(6);
operator.getLockToAcquireForStateAccessDuringBundles().lock();
BiConsumer<String, Instant> timerConsumer = (timerId, timestamp) -> operator.setTimer(Timer.of(windowedValue.getValue().getKey(), "", windowedValue.getWindows(), timestamp, timestamp, PaneInfo.NO_FIRING), TimerInternals.TimerData.of("", TimerReceiverFactory.encodeToTimerDataTimerId("transform", timerId), StateNamespaces.window(IntervalWindow.getCoder(), intervalWindow), timestamp, timestamp, TimeDomain.EVENT_TIME));
timerConsumer.accept("timer", timerTarget);
timerConsumer.accept("timer2", timerTarget2);
assertThat(testHarness.numEventTimeTimers(), is(3));
// Advance input watermark past the timer
// Check the output watermark is held back
long targetWatermark = timerTarget.getMillis() + 100;
testHarness.processWatermark(targetWatermark);
// Do not yet advance the output watermark because we are still processing a bundle
assertThat(testHarness.numEventTimeTimers(), is(3));
assertThat(operator.getCurrentOutputWatermark(), is(2L));
// Check that the timers are fired but the output watermark is advanced no further than
// the minimum timer timestamp of the previous bundle because we are still processing a
// bundle which might contain more timers.
// Timers can create loops if they keep rescheduling themselves when firing
// Thus, we advance the watermark asynchronously to allow for checkpointing to run
operator.invokeFinishBundle();
assertThat(testHarness.numEventTimeTimers(), is(3));
testHarness.setProcessingTime(testHarness.getProcessingTime() + 1);
assertThat(testHarness.numEventTimeTimers(), is(0));
assertThat(operator.getCurrentOutputWatermark(), is(5L));
// Output watermark is advanced synchronously when the bundle finishes,
// no more timers are scheduled
operator.invokeFinishBundle();
assertThat(operator.getCurrentOutputWatermark(), is(targetWatermark));
assertThat(testHarness.numEventTimeTimers(), is(0));
// Watermark is advanced in a blocking fashion on close, not via a timers
// Create a bundle with a pending timer to simulate that
testHarness.processElement(new StreamRecord<>(windowedValue));
timerConsumer.accept("timer3", new Instant(targetWatermark));
assertThat(testHarness.numEventTimeTimers(), is(1));
// This should be blocking until the watermark reaches Long.MAX_VALUE.
testHarness.close();
assertThat(testHarness.numEventTimeTimers(), is(0));
assertThat(operator.getCurrentOutputWatermark(), is(Long.MAX_VALUE));
}
use of org.apache.beam.model.fnexecution.v1.BeamFnApi.Elements.Timers in project beam by apache.
the class StreamingDataflowWorker method process.
private void process(final SdkWorkerHarness worker, final ComputationState computationState, final Instant inputDataWatermark, @Nullable final Instant outputDataWatermark, @Nullable final Instant synchronizedProcessingTime, final Work work) {
final Windmill.WorkItem workItem = work.getWorkItem();
final String computationId = computationState.getComputationId();
final ByteString key = workItem.getKey();
work.setState(State.PROCESSING);
{
StringBuilder workIdBuilder = new StringBuilder(33);
workIdBuilder.append(Long.toHexString(workItem.getShardingKey()));
workIdBuilder.append('-');
workIdBuilder.append(Long.toHexString(workItem.getWorkToken()));
DataflowWorkerLoggingMDC.setWorkId(workIdBuilder.toString());
}
DataflowWorkerLoggingMDC.setStageName(computationId);
LOG.debug("Starting processing for {}:\n{}", computationId, work);
Windmill.WorkItemCommitRequest.Builder outputBuilder = initializeOutputBuilder(key, workItem);
// Before any processing starts, call any pending OnCommit callbacks. Nothing that requires
// cleanup should be done before this, since we might exit early here.
callFinalizeCallbacks(workItem);
if (workItem.getSourceState().getOnlyFinalize()) {
outputBuilder.setSourceStateUpdates(Windmill.SourceState.newBuilder().setOnlyFinalize(true));
work.setState(State.COMMIT_QUEUED);
commitQueue.put(new Commit(outputBuilder.build(), computationState, work));
return;
}
long processingStartTimeNanos = System.nanoTime();
final MapTask mapTask = computationState.getMapTask();
StageInfo stageInfo = stageInfoMap.computeIfAbsent(mapTask.getStageName(), s -> new StageInfo(s, mapTask.getSystemName(), this));
ExecutionState executionState = null;
try {
executionState = computationState.getExecutionStateQueue(worker).poll();
if (executionState == null) {
MutableNetwork<Node, Edge> mapTaskNetwork = mapTaskToNetwork.apply(mapTask);
if (LOG.isDebugEnabled()) {
LOG.debug("Network as Graphviz .dot: {}", Networks.toDot(mapTaskNetwork));
}
ParallelInstructionNode readNode = (ParallelInstructionNode) Iterables.find(mapTaskNetwork.nodes(), node -> node instanceof ParallelInstructionNode && ((ParallelInstructionNode) node).getParallelInstruction().getRead() != null);
InstructionOutputNode readOutputNode = (InstructionOutputNode) Iterables.getOnlyElement(mapTaskNetwork.successors(readNode));
DataflowExecutionContext.DataflowExecutionStateTracker executionStateTracker = new DataflowExecutionContext.DataflowExecutionStateTracker(ExecutionStateSampler.instance(), stageInfo.executionStateRegistry.getState(NameContext.forStage(mapTask.getStageName()), "other", null, ScopedProfiler.INSTANCE.emptyScope()), stageInfo.deltaCounters, options, computationId);
StreamingModeExecutionContext context = new StreamingModeExecutionContext(pendingDeltaCounters, computationId, readerCache, !computationState.getTransformUserNameToStateFamily().isEmpty() ? computationState.getTransformUserNameToStateFamily() : stateNameMap, stateCache.forComputation(computationId), stageInfo.metricsContainerRegistry, executionStateTracker, stageInfo.executionStateRegistry, maxSinkBytes);
DataflowMapTaskExecutor mapTaskExecutor = mapTaskExecutorFactory.create(worker.getControlClientHandler(), worker.getGrpcDataFnServer(), sdkHarnessRegistry.beamFnDataApiServiceDescriptor(), worker.getGrpcStateFnServer(), mapTaskNetwork, options, mapTask.getStageName(), readerRegistry, sinkRegistry, context, pendingDeltaCounters, idGenerator);
ReadOperation readOperation = mapTaskExecutor.getReadOperation();
// Disable progress updates since its results are unused for streaming
// and involves starting a thread.
readOperation.setProgressUpdatePeriodMs(ReadOperation.DONT_UPDATE_PERIODICALLY);
Preconditions.checkState(mapTaskExecutor.supportsRestart(), "Streaming runner requires all operations support restart.");
Coder<?> readCoder;
readCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(readOutputNode.getInstructionOutput().getCodec()));
Coder<?> keyCoder = extractKeyCoder(readCoder);
// If using a custom source, count bytes read for autoscaling.
if (CustomSources.class.getName().equals(readNode.getParallelInstruction().getRead().getSource().getSpec().get("@type"))) {
NameContext nameContext = NameContext.create(mapTask.getStageName(), readNode.getParallelInstruction().getOriginalName(), readNode.getParallelInstruction().getSystemName(), readNode.getParallelInstruction().getName());
readOperation.receivers[0].addOutputCounter(new OutputObjectAndByteCounter(new IntrinsicMapTaskExecutorFactory.ElementByteSizeObservableCoder<>(readCoder), mapTaskExecutor.getOutputCounters(), nameContext).setSamplingPeriod(100).countBytes("dataflow_input_size-" + mapTask.getSystemName()));
}
executionState = new ExecutionState(mapTaskExecutor, context, keyCoder, executionStateTracker);
}
WindmillStateReader stateReader = new WindmillStateReader(metricTrackingWindmillServer, computationId, key, workItem.getShardingKey(), workItem.getWorkToken());
StateFetcher localStateFetcher = stateFetcher.byteTrackingView();
// If the read output KVs, then we can decode Windmill's byte key into a userland
// key object and provide it to the execution context for use with per-key state.
// Otherwise, we pass null.
//
// The coder type that will be present is:
// WindowedValueCoder(TimerOrElementCoder(KvCoder))
@Nullable Coder<?> keyCoder = executionState.getKeyCoder();
@Nullable Object executionKey = keyCoder == null ? null : keyCoder.decode(key.newInput(), Coder.Context.OUTER);
if (workItem.hasHotKeyInfo()) {
Windmill.HotKeyInfo hotKeyInfo = workItem.getHotKeyInfo();
Duration hotKeyAge = Duration.millis(hotKeyInfo.getHotKeyAgeUsec() / 1000);
// The MapTask instruction is ordered by dependencies, such that the first element is
// always going to be the shuffle task.
String stepName = computationState.getMapTask().getInstructions().get(0).getName();
if (options.isHotKeyLoggingEnabled() && keyCoder != null) {
hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge, executionKey);
} else {
hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge);
}
}
executionState.getContext().start(executionKey, workItem, inputDataWatermark, outputDataWatermark, synchronizedProcessingTime, stateReader, localStateFetcher, outputBuilder);
// Blocks while executing work.
executionState.getWorkExecutor().execute();
Iterables.addAll(this.pendingMonitoringInfos, executionState.getWorkExecutor().extractMetricUpdates());
commitCallbacks.putAll(executionState.getContext().flushState());
// Release the execution state for another thread to use.
computationState.getExecutionStateQueue(worker).offer(executionState);
executionState = null;
// Add the output to the commit queue.
work.setState(State.COMMIT_QUEUED);
WorkItemCommitRequest commitRequest = outputBuilder.build();
int byteLimit = maxWorkItemCommitBytes;
int commitSize = commitRequest.getSerializedSize();
int estimatedCommitSize = commitSize < 0 ? Integer.MAX_VALUE : commitSize;
// Detect overflow of integer serialized size or if the byte limit was exceeded.
windmillMaxObservedWorkItemCommitBytes.addValue(estimatedCommitSize);
if (commitSize < 0 || commitSize > byteLimit) {
KeyCommitTooLargeException e = KeyCommitTooLargeException.causedBy(computationId, byteLimit, commitRequest);
reportFailure(computationId, workItem, e);
LOG.error(e.toString());
// Drop the current request in favor of a new, minimal one requesting truncation.
// Messages, timers, counters, and other commit content will not be used by the service
// so we're purposefully dropping them here
commitRequest = buildWorkItemTruncationRequest(key, workItem, estimatedCommitSize);
}
commitQueue.put(new Commit(commitRequest, computationState, work));
// Compute shuffle and state byte statistics these will be flushed asynchronously.
long stateBytesWritten = outputBuilder.clearOutputMessages().build().getSerializedSize();
long shuffleBytesRead = 0;
for (Windmill.InputMessageBundle bundle : workItem.getMessageBundlesList()) {
for (Windmill.Message message : bundle.getMessagesList()) {
shuffleBytesRead += message.getSerializedSize();
}
}
long stateBytesRead = stateReader.getBytesRead() + localStateFetcher.getBytesRead();
windmillShuffleBytesRead.addValue(shuffleBytesRead);
windmillStateBytesRead.addValue(stateBytesRead);
windmillStateBytesWritten.addValue(stateBytesWritten);
LOG.debug("Processing done for work token: {}", workItem.getWorkToken());
} catch (Throwable t) {
if (executionState != null) {
try {
executionState.getContext().invalidateCache();
executionState.getWorkExecutor().close();
} catch (Exception e) {
LOG.warn("Failed to close map task executor: ", e);
} finally {
// Release references to potentially large objects early.
executionState = null;
}
}
t = t instanceof UserCodeException ? t.getCause() : t;
boolean retryLocally = false;
if (KeyTokenInvalidException.isKeyTokenInvalidException(t)) {
LOG.debug("Execution of work for computation '{}' on key '{}' failed due to token expiration. " + "Work will not be retried locally.", computationId, key.toStringUtf8());
} else {
LastExceptionDataProvider.reportException(t);
LOG.debug("Failed work: {}", work);
Duration elapsedTimeSinceStart = new Duration(Instant.now(), work.getStartTime());
if (!reportFailure(computationId, workItem, t)) {
LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception, " + "and Windmill indicated not to retry locally.", computationId, key.toStringUtf8(), t);
} else if (isOutOfMemoryError(t)) {
File heapDump = memoryMonitor.tryToDumpHeap();
LOG.error("Execution of work for computation '{}' for key '{}' failed with out-of-memory. " + "Work will not be retried locally. Heap dump {}.", computationId, key.toStringUtf8(), heapDump == null ? "not written" : ("written to '" + heapDump + "'"), t);
} else if (elapsedTimeSinceStart.isLongerThan(MAX_LOCAL_PROCESSING_RETRY_DURATION)) {
LOG.error("Execution of work for computation '{}' for key '{}' failed with uncaught exception, " + "and it will not be retried locally because the elapsed time since start {} " + "exceeds {}.", computationId, key.toStringUtf8(), elapsedTimeSinceStart, MAX_LOCAL_PROCESSING_RETRY_DURATION, t);
} else {
LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception. " + "Work will be retried locally.", computationId, key.toStringUtf8(), t);
retryLocally = true;
}
}
if (retryLocally) {
// Try again after some delay and at the end of the queue to avoid a tight loop.
sleep(retryLocallyDelayMs);
workUnitExecutor.forceExecute(work, work.getWorkItem().getSerializedSize());
} else {
// Consider the item invalid. It will eventually be retried by Windmill if it still needs to
// be processed.
computationState.completeWork(ShardedKey.create(key, workItem.getShardingKey()), workItem.getWorkToken());
}
} finally {
// Update total processing time counters. Updating in finally clause ensures that
// work items causing exceptions are also accounted in time spent.
long processingTimeMsecs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - processingStartTimeNanos);
stageInfo.totalProcessingMsecs.addValue(processingTimeMsecs);
// either here or in DFE.
if (work.getWorkItem().hasTimers()) {
stageInfo.timerProcessingMsecs.addValue(processingTimeMsecs);
}
DataflowWorkerLoggingMDC.setWorkId(null);
DataflowWorkerLoggingMDC.setStageName(null);
}
}
Aggregations