use of com.google.api.services.dataflow.model.WorkItem in project beam by apache.
the class DataflowWorkUnitClient method getWorkItemInternal.
private Optional<WorkItem> getWorkItemInternal(List<String> workItemTypes, List<String> capabilities) throws IOException {
LeaseWorkItemRequest request = new LeaseWorkItemRequest();
request.setFactory(Transport.getJsonFactory());
request.setWorkItemTypes(workItemTypes);
request.setWorkerCapabilities(capabilities);
request.setWorkerId(options.getWorkerId());
request.setCurrentWorkerTime(toCloudTime(DateTime.now()));
// This shouldn't be necessary, but a valid cloud duration string is
// required by the Google API parsing framework. TODO: Fix the framework
// so that an empty or not-present string can be used as a default value.
request.setRequestedLeaseDuration(toCloudDuration(Duration.millis(WorkProgressUpdater.DEFAULT_LEASE_DURATION_MILLIS)));
logger.debug("Leasing work: {}", request);
LeaseWorkItemResponse response = dataflow.projects().locations().jobs().workItems().lease(options.getProject(), options.getRegion(), options.getJobId(), request).execute();
logger.debug("Lease work response: {}", response);
List<WorkItem> workItems = response.getWorkItems();
if (workItems == null || workItems.isEmpty()) {
// We didn't lease any work.
return Optional.absent();
} else if (workItems.size() > 1) {
throw new IOException("This version of the SDK expects no more than one work item from the service: " + response);
}
WorkItem work = response.getWorkItems().get(0);
// Looks like the work's a'ight.
return Optional.of(work);
}
use of com.google.api.services.dataflow.model.WorkItem in project beam by apache.
the class StreamingDataflowWorker method sendWorkerUpdatesToDataflowService.
/**
* Sends counter updates to Dataflow backend.
*/
private void sendWorkerUpdatesToDataflowService(CounterSet deltaCounters, CounterSet cumulativeCounters) throws IOException {
// Throttle time is tracked by the windmillServer but is reported to DFE here.
windmillQuotaThrottling.addValue(windmillServer.getAndResetThrottleTime());
if (memoryMonitor.isThrashing()) {
memoryThrashing.addValue(1);
}
List<CounterUpdate> counterUpdates = new ArrayList<>(128);
if (publishCounters) {
stageInfoMap.values().forEach(s -> counterUpdates.addAll(s.extractCounterUpdates()));
counterUpdates.addAll(cumulativeCounters.extractUpdates(false, DataflowCounterUpdateExtractor.INSTANCE));
counterUpdates.addAll(deltaCounters.extractModifiedDeltaUpdates(DataflowCounterUpdateExtractor.INSTANCE));
if (hasExperiment(options, "beam_fn_api")) {
Map<Object, List<CounterUpdate>> fnApiCounters = new HashMap<>();
while (!this.pendingMonitoringInfos.isEmpty()) {
final CounterUpdate item = this.pendingMonitoringInfos.poll();
// WorkItem.
if (item.getCumulative()) {
item.setCumulative(false);
// Group counterUpdates by counterUpdateKey so they can be aggregated before sending to
// dataflow service.
fnApiCounters.computeIfAbsent(getCounterUpdateKey(item), k -> new ArrayList<>()).add(item);
} else {
// This is a safety check in case new counter type appears in FnAPI.
throw new UnsupportedOperationException("FnApi counters are expected to provide cumulative values." + " Please, update conversion to delta logic" + " if non-cumulative counter type is required.");
}
}
// so we can avoid excessive I/Os for reporting to dataflow service.
for (List<CounterUpdate> counterUpdateList : fnApiCounters.values()) {
if (counterUpdateList.isEmpty()) {
continue;
}
List<CounterUpdate> aggregatedCounterUpdateList = CounterUpdateAggregators.aggregate(counterUpdateList);
// updates.
if (aggregatedCounterUpdateList.size() > 10) {
CounterUpdate head = aggregatedCounterUpdateList.get(0);
this.counterAggregationErrorCount.getAndIncrement();
// log warning message only when error count is the power of 2 to avoid spamming.
if (this.counterAggregationErrorCount.get() > 10 && Long.bitCount(this.counterAggregationErrorCount.get()) == 1) {
LOG.warn("Found non-aggregated counter updates of size {} with kind {}, this will likely " + "cause performance degradation and excessive GC if size is large.", counterUpdateList.size(), MoreObjects.firstNonNull(head.getNameAndKind(), head.getStructuredNameAndMetadata()));
}
}
counterUpdates.addAll(aggregatedCounterUpdateList);
}
}
}
// Handle duplicate counters from different stages. Store all the counters in a multi-map and
// send the counters that appear multiple times in separate RPCs. Same logical counter could
// appear in multiple stages if a step runs in multiple stages (as with flatten-unzipped stages)
// especially if the counter definition does not set execution_step_name.
ListMultimap<Object, CounterUpdate> counterMultimap = MultimapBuilder.hashKeys(counterUpdates.size()).linkedListValues().build();
boolean hasDuplicates = false;
for (CounterUpdate c : counterUpdates) {
Object key = getCounterUpdateKey(c);
if (counterMultimap.containsKey(key)) {
hasDuplicates = true;
}
counterMultimap.put(key, c);
}
// Clears counterUpdates and enqueues unique counters from counterMultimap. If a counter
// appears more than once, one of them is extracted leaving the remaining in the map.
Runnable extractUniqueCounters = () -> {
counterUpdates.clear();
for (Iterator<Object> iter = counterMultimap.keySet().iterator(); iter.hasNext(); ) {
List<CounterUpdate> counters = counterMultimap.get(iter.next());
counterUpdates.add(counters.get(0));
if (counters.size() == 1) {
// There is single value. Remove the entry through the iterator.
iter.remove();
} else {
// Otherwise remove the first value.
counters.remove(0);
}
}
};
if (hasDuplicates) {
extractUniqueCounters.run();
} else {
// Common case: no duplicates. We can just send counterUpdates, empty the multimap.
counterMultimap.clear();
}
List<Status> errors;
synchronized (pendingFailuresToReport) {
errors = new ArrayList<>(pendingFailuresToReport.size());
for (String stackTrace : pendingFailuresToReport) {
errors.add(new Status().setCode(// rpc.Code.UNKNOWN
2).setMessage(stackTrace));
}
// Best effort only, no need to wait till successfully sent.
pendingFailuresToReport.clear();
}
WorkItemStatus workItemStatus = new WorkItemStatus().setWorkItemId(WINDMILL_COUNTER_UPDATE_WORK_ID).setErrors(errors).setCounterUpdates(counterUpdates);
workUnitClient.reportWorkItemStatus(workItemStatus);
// Send any counters appearing more than once in subsequent RPCs:
while (!counterMultimap.isEmpty()) {
extractUniqueCounters.run();
workUnitClient.reportWorkItemStatus(new WorkItemStatus().setWorkItemId(WINDMILL_COUNTER_UPDATE_WORK_ID).setCounterUpdates(counterUpdates));
}
}
use of com.google.api.services.dataflow.model.WorkItem in project beam by apache.
the class StreamingDataflowWorker method process.
private void process(final SdkWorkerHarness worker, final ComputationState computationState, final Instant inputDataWatermark, @Nullable final Instant outputDataWatermark, @Nullable final Instant synchronizedProcessingTime, final Work work) {
final Windmill.WorkItem workItem = work.getWorkItem();
final String computationId = computationState.getComputationId();
final ByteString key = workItem.getKey();
work.setState(State.PROCESSING);
{
StringBuilder workIdBuilder = new StringBuilder(33);
workIdBuilder.append(Long.toHexString(workItem.getShardingKey()));
workIdBuilder.append('-');
workIdBuilder.append(Long.toHexString(workItem.getWorkToken()));
DataflowWorkerLoggingMDC.setWorkId(workIdBuilder.toString());
}
DataflowWorkerLoggingMDC.setStageName(computationId);
LOG.debug("Starting processing for {}:\n{}", computationId, work);
Windmill.WorkItemCommitRequest.Builder outputBuilder = initializeOutputBuilder(key, workItem);
// Before any processing starts, call any pending OnCommit callbacks. Nothing that requires
// cleanup should be done before this, since we might exit early here.
callFinalizeCallbacks(workItem);
if (workItem.getSourceState().getOnlyFinalize()) {
outputBuilder.setSourceStateUpdates(Windmill.SourceState.newBuilder().setOnlyFinalize(true));
work.setState(State.COMMIT_QUEUED);
commitQueue.put(new Commit(outputBuilder.build(), computationState, work));
return;
}
long processingStartTimeNanos = System.nanoTime();
final MapTask mapTask = computationState.getMapTask();
StageInfo stageInfo = stageInfoMap.computeIfAbsent(mapTask.getStageName(), s -> new StageInfo(s, mapTask.getSystemName(), this));
ExecutionState executionState = null;
try {
executionState = computationState.getExecutionStateQueue(worker).poll();
if (executionState == null) {
MutableNetwork<Node, Edge> mapTaskNetwork = mapTaskToNetwork.apply(mapTask);
if (LOG.isDebugEnabled()) {
LOG.debug("Network as Graphviz .dot: {}", Networks.toDot(mapTaskNetwork));
}
ParallelInstructionNode readNode = (ParallelInstructionNode) Iterables.find(mapTaskNetwork.nodes(), node -> node instanceof ParallelInstructionNode && ((ParallelInstructionNode) node).getParallelInstruction().getRead() != null);
InstructionOutputNode readOutputNode = (InstructionOutputNode) Iterables.getOnlyElement(mapTaskNetwork.successors(readNode));
DataflowExecutionContext.DataflowExecutionStateTracker executionStateTracker = new DataflowExecutionContext.DataflowExecutionStateTracker(ExecutionStateSampler.instance(), stageInfo.executionStateRegistry.getState(NameContext.forStage(mapTask.getStageName()), "other", null, ScopedProfiler.INSTANCE.emptyScope()), stageInfo.deltaCounters, options, computationId);
StreamingModeExecutionContext context = new StreamingModeExecutionContext(pendingDeltaCounters, computationId, readerCache, !computationState.getTransformUserNameToStateFamily().isEmpty() ? computationState.getTransformUserNameToStateFamily() : stateNameMap, stateCache.forComputation(computationId), stageInfo.metricsContainerRegistry, executionStateTracker, stageInfo.executionStateRegistry, maxSinkBytes);
DataflowMapTaskExecutor mapTaskExecutor = mapTaskExecutorFactory.create(worker.getControlClientHandler(), worker.getGrpcDataFnServer(), sdkHarnessRegistry.beamFnDataApiServiceDescriptor(), worker.getGrpcStateFnServer(), mapTaskNetwork, options, mapTask.getStageName(), readerRegistry, sinkRegistry, context, pendingDeltaCounters, idGenerator);
ReadOperation readOperation = mapTaskExecutor.getReadOperation();
// Disable progress updates since its results are unused for streaming
// and involves starting a thread.
readOperation.setProgressUpdatePeriodMs(ReadOperation.DONT_UPDATE_PERIODICALLY);
Preconditions.checkState(mapTaskExecutor.supportsRestart(), "Streaming runner requires all operations support restart.");
Coder<?> readCoder;
readCoder = CloudObjects.coderFromCloudObject(CloudObject.fromSpec(readOutputNode.getInstructionOutput().getCodec()));
Coder<?> keyCoder = extractKeyCoder(readCoder);
// If using a custom source, count bytes read for autoscaling.
if (CustomSources.class.getName().equals(readNode.getParallelInstruction().getRead().getSource().getSpec().get("@type"))) {
NameContext nameContext = NameContext.create(mapTask.getStageName(), readNode.getParallelInstruction().getOriginalName(), readNode.getParallelInstruction().getSystemName(), readNode.getParallelInstruction().getName());
readOperation.receivers[0].addOutputCounter(new OutputObjectAndByteCounter(new IntrinsicMapTaskExecutorFactory.ElementByteSizeObservableCoder<>(readCoder), mapTaskExecutor.getOutputCounters(), nameContext).setSamplingPeriod(100).countBytes("dataflow_input_size-" + mapTask.getSystemName()));
}
executionState = new ExecutionState(mapTaskExecutor, context, keyCoder, executionStateTracker);
}
WindmillStateReader stateReader = new WindmillStateReader(metricTrackingWindmillServer, computationId, key, workItem.getShardingKey(), workItem.getWorkToken());
StateFetcher localStateFetcher = stateFetcher.byteTrackingView();
// If the read output KVs, then we can decode Windmill's byte key into a userland
// key object and provide it to the execution context for use with per-key state.
// Otherwise, we pass null.
//
// The coder type that will be present is:
// WindowedValueCoder(TimerOrElementCoder(KvCoder))
@Nullable Coder<?> keyCoder = executionState.getKeyCoder();
@Nullable Object executionKey = keyCoder == null ? null : keyCoder.decode(key.newInput(), Coder.Context.OUTER);
if (workItem.hasHotKeyInfo()) {
Windmill.HotKeyInfo hotKeyInfo = workItem.getHotKeyInfo();
Duration hotKeyAge = Duration.millis(hotKeyInfo.getHotKeyAgeUsec() / 1000);
// The MapTask instruction is ordered by dependencies, such that the first element is
// always going to be the shuffle task.
String stepName = computationState.getMapTask().getInstructions().get(0).getName();
if (options.isHotKeyLoggingEnabled() && keyCoder != null) {
hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge, executionKey);
} else {
hotKeyLogger.logHotKeyDetection(stepName, hotKeyAge);
}
}
executionState.getContext().start(executionKey, workItem, inputDataWatermark, outputDataWatermark, synchronizedProcessingTime, stateReader, localStateFetcher, outputBuilder);
// Blocks while executing work.
executionState.getWorkExecutor().execute();
Iterables.addAll(this.pendingMonitoringInfos, executionState.getWorkExecutor().extractMetricUpdates());
commitCallbacks.putAll(executionState.getContext().flushState());
// Release the execution state for another thread to use.
computationState.getExecutionStateQueue(worker).offer(executionState);
executionState = null;
// Add the output to the commit queue.
work.setState(State.COMMIT_QUEUED);
WorkItemCommitRequest commitRequest = outputBuilder.build();
int byteLimit = maxWorkItemCommitBytes;
int commitSize = commitRequest.getSerializedSize();
int estimatedCommitSize = commitSize < 0 ? Integer.MAX_VALUE : commitSize;
// Detect overflow of integer serialized size or if the byte limit was exceeded.
windmillMaxObservedWorkItemCommitBytes.addValue(estimatedCommitSize);
if (commitSize < 0 || commitSize > byteLimit) {
KeyCommitTooLargeException e = KeyCommitTooLargeException.causedBy(computationId, byteLimit, commitRequest);
reportFailure(computationId, workItem, e);
LOG.error(e.toString());
// Drop the current request in favor of a new, minimal one requesting truncation.
// Messages, timers, counters, and other commit content will not be used by the service
// so we're purposefully dropping them here
commitRequest = buildWorkItemTruncationRequest(key, workItem, estimatedCommitSize);
}
commitQueue.put(new Commit(commitRequest, computationState, work));
// Compute shuffle and state byte statistics these will be flushed asynchronously.
long stateBytesWritten = outputBuilder.clearOutputMessages().build().getSerializedSize();
long shuffleBytesRead = 0;
for (Windmill.InputMessageBundle bundle : workItem.getMessageBundlesList()) {
for (Windmill.Message message : bundle.getMessagesList()) {
shuffleBytesRead += message.getSerializedSize();
}
}
long stateBytesRead = stateReader.getBytesRead() + localStateFetcher.getBytesRead();
windmillShuffleBytesRead.addValue(shuffleBytesRead);
windmillStateBytesRead.addValue(stateBytesRead);
windmillStateBytesWritten.addValue(stateBytesWritten);
LOG.debug("Processing done for work token: {}", workItem.getWorkToken());
} catch (Throwable t) {
if (executionState != null) {
try {
executionState.getContext().invalidateCache();
executionState.getWorkExecutor().close();
} catch (Exception e) {
LOG.warn("Failed to close map task executor: ", e);
} finally {
// Release references to potentially large objects early.
executionState = null;
}
}
t = t instanceof UserCodeException ? t.getCause() : t;
boolean retryLocally = false;
if (KeyTokenInvalidException.isKeyTokenInvalidException(t)) {
LOG.debug("Execution of work for computation '{}' on key '{}' failed due to token expiration. " + "Work will not be retried locally.", computationId, key.toStringUtf8());
} else {
LastExceptionDataProvider.reportException(t);
LOG.debug("Failed work: {}", work);
Duration elapsedTimeSinceStart = new Duration(Instant.now(), work.getStartTime());
if (!reportFailure(computationId, workItem, t)) {
LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception, " + "and Windmill indicated not to retry locally.", computationId, key.toStringUtf8(), t);
} else if (isOutOfMemoryError(t)) {
File heapDump = memoryMonitor.tryToDumpHeap();
LOG.error("Execution of work for computation '{}' for key '{}' failed with out-of-memory. " + "Work will not be retried locally. Heap dump {}.", computationId, key.toStringUtf8(), heapDump == null ? "not written" : ("written to '" + heapDump + "'"), t);
} else if (elapsedTimeSinceStart.isLongerThan(MAX_LOCAL_PROCESSING_RETRY_DURATION)) {
LOG.error("Execution of work for computation '{}' for key '{}' failed with uncaught exception, " + "and it will not be retried locally because the elapsed time since start {} " + "exceeds {}.", computationId, key.toStringUtf8(), elapsedTimeSinceStart, MAX_LOCAL_PROCESSING_RETRY_DURATION, t);
} else {
LOG.error("Execution of work for computation '{}' on key '{}' failed with uncaught exception. " + "Work will be retried locally.", computationId, key.toStringUtf8(), t);
retryLocally = true;
}
}
if (retryLocally) {
// Try again after some delay and at the end of the queue to avoid a tight loop.
sleep(retryLocallyDelayMs);
workUnitExecutor.forceExecute(work, work.getWorkItem().getSerializedSize());
} else {
// Consider the item invalid. It will eventually be retried by Windmill if it still needs to
// be processed.
computationState.completeWork(ShardedKey.create(key, workItem.getShardingKey()), workItem.getWorkToken());
}
} finally {
// Update total processing time counters. Updating in finally clause ensures that
// work items causing exceptions are also accounted in time spent.
long processingTimeMsecs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - processingStartTimeNanos);
stageInfo.totalProcessingMsecs.addValue(processingTimeMsecs);
// either here or in DFE.
if (work.getWorkItem().hasTimers()) {
stageInfo.timerProcessingMsecs.addValue(processingTimeMsecs);
}
DataflowWorkerLoggingMDC.setWorkId(null);
DataflowWorkerLoggingMDC.setStageName(null);
}
}
use of com.google.api.services.dataflow.model.WorkItem in project beam by apache.
the class StreamingDataflowWorker method streamingDispatchLoop.
void streamingDispatchLoop() {
while (running.get()) {
GetWorkStream stream = windmillServer.getWorkStream(Windmill.GetWorkRequest.newBuilder().setClientId(clientId).setMaxItems(chooseMaximumBundlesOutstanding()).setMaxBytes(MAX_GET_WORK_FETCH_BYTES).build(), (String computation, Instant inputDataWatermark, Instant synchronizedProcessingTime, Windmill.WorkItem workItem) -> {
memoryMonitor.waitForResources("GetWork");
scheduleWorkItem(getComputationState(computation), inputDataWatermark, synchronizedProcessingTime, workItem);
});
try {
// we half-close the stream after some time and create a new one.
if (!stream.awaitTermination(GET_WORK_STREAM_TIMEOUT_MINUTES, TimeUnit.MINUTES)) {
stream.close();
}
} catch (InterruptedException e) {
// Continue processing until !running.get()
}
}
}
use of com.google.api.services.dataflow.model.WorkItem in project beam by apache.
the class DataflowWorkUnitClientTest method testCloudServiceCallMapTaskStagePropagation.
@Test
public void testCloudServiceCallMapTaskStagePropagation() throws Exception {
WorkUnitClient client = new DataflowWorkUnitClient(pipelineOptions, LOG);
// Publish and acquire a map task work item, and verify we're now processing that stage.
final String stageName = "test_stage_name";
MapTask mapTask = new MapTask();
mapTask.setStageName(stageName);
WorkItem workItem = createWorkItem(PROJECT_ID, JOB_ID);
workItem.setMapTask(mapTask);
when(request.execute()).thenReturn(generateMockResponse(workItem));
assertEquals(Optional.of(workItem), client.getWorkItem());
assertEquals(stageName, DataflowWorkerLoggingMDC.getStageName());
}
Aggregations