Search in sources :

Example 1 with CounterCell

use of org.apache.beam.runners.core.metrics.CounterCell in project beam by apache.

the class SparkGroupAlsoByWindowViaWindowSet method groupAlsoByWindow.

public static <K, InputT, W extends BoundedWindow> JavaDStream<WindowedValue<KV<K, Iterable<InputT>>>> groupAlsoByWindow(JavaDStream<WindowedValue<KV<K, Iterable<WindowedValue<InputT>>>>> inputDStream, final Coder<K> keyCoder, final Coder<WindowedValue<InputT>> wvCoder, final WindowingStrategy<?, W> windowingStrategy, final SparkRuntimeContext runtimeContext, final List<Integer> sourceIds) {
    final IterableCoder<WindowedValue<InputT>> itrWvCoder = IterableCoder.of(wvCoder);
    final Coder<InputT> iCoder = ((FullWindowedValueCoder<InputT>) wvCoder).getValueCoder();
    final Coder<? extends BoundedWindow> wCoder = ((FullWindowedValueCoder<InputT>) wvCoder).getWindowCoder();
    final Coder<WindowedValue<KV<K, Iterable<InputT>>>> wvKvIterCoder = FullWindowedValueCoder.of(KvCoder.of(keyCoder, IterableCoder.of(iCoder)), wCoder);
    final TimerInternals.TimerDataCoder timerDataCoder = TimerInternals.TimerDataCoder.of(windowingStrategy.getWindowFn().windowCoder());
    long checkpointDurationMillis = runtimeContext.getPipelineOptions().as(SparkPipelineOptions.class).getCheckpointDurationMillis();
    // we have to switch to Scala API to avoid Optional in the Java API, see: SPARK-4819.
    // we also have a broader API for Scala (access to the actual key and entire iterator).
    // we use coders to convert objects in the PCollection to byte arrays, so they
    // can be transferred over the network for the shuffle and be in serialized form
    // for checkpointing.
    // for readability, we add comments with actual type next to byte[].
    // to shorten line length, we use:
    //---- WV: WindowedValue
    //---- Iterable: Itr
    //---- AccumT: A
    //---- InputT: I
    DStream<Tuple2<ByteArray, byte[]>> /*Itr<WV<I>>*/
    pairDStream = inputDStream.transformToPair(new Function<JavaRDD<WindowedValue<KV<K, Iterable<WindowedValue<InputT>>>>>, JavaPairRDD<ByteArray, byte[]>>() {

        // we use mapPartitions with the RDD API because its the only available API
        // that allows to preserve partitioning.
        @Override
        public JavaPairRDD<ByteArray, byte[]> call(JavaRDD<WindowedValue<KV<K, Iterable<WindowedValue<InputT>>>>> rdd) throws Exception {
            return rdd.mapPartitions(TranslationUtils.functionToFlatMapFunction(WindowingHelpers.<KV<K, Iterable<WindowedValue<InputT>>>>unwindowFunction()), true).mapPartitionsToPair(TranslationUtils.<K, Iterable<WindowedValue<InputT>>>toPairFlatMapFunction(), true).mapPartitionsToPair(TranslationUtils.pairFunctionToPairFlatMapFunction(CoderHelpers.toByteFunction(keyCoder, itrWvCoder)), true);
        }
    }).dstream();
    PairDStreamFunctions<ByteArray, byte[]> pairDStreamFunctions = DStream.toPairDStreamFunctions(pairDStream, JavaSparkContext$.MODULE$.<ByteArray>fakeClassTag(), JavaSparkContext$.MODULE$.<byte[]>fakeClassTag(), null);
    int defaultNumPartitions = pairDStreamFunctions.defaultPartitioner$default$1();
    Partitioner partitioner = pairDStreamFunctions.defaultPartitioner(defaultNumPartitions);
    // use updateStateByKey to scan through the state and update elements and timers.
    DStream<Tuple2<ByteArray, Tuple2<StateAndTimers, List<byte[]>>>> /*WV<KV<K, Itr<I>>>*/
    firedStream = pairDStreamFunctions.updateStateByKey(new SerializableFunction1<scala.collection.Iterator<Tuple3</*K*/
    ByteArray, Seq<byte[]>, Option<Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
    List<byte[]>>>>>, scala.collection.Iterator<Tuple2</*K*/
    ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
    List<byte[]>>>>>() {

        @Override
        public scala.collection.Iterator<Tuple2</*K*/
        ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
        List<byte[]>>>> apply(final scala.collection.Iterator<Tuple3</*K*/
        ByteArray, Seq<byte[]>, Option<Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
        List<byte[]>>>>> iter) {
            //--- ACTUAL STATEFUL OPERATION:
            //
            // Input Iterator: the partition (~bundle) of a cogrouping of the input
            // and the previous state (if exists).
            //
            // Output Iterator: the output key, and the updated state.
            //
            // possible input scenarios for (K, Seq, Option<S>):
            // (1) Option<S>.isEmpty: new data with no previous state.
            // (2) Seq.isEmpty: no new data, but evaluating previous state (timer-like behaviour).
            // (3) Seq.nonEmpty && Option<S>.isDefined: new data with previous state.
            final SystemReduceFn<K, InputT, Iterable<InputT>, Iterable<InputT>, W> reduceFn = SystemReduceFn.buffering(((FullWindowedValueCoder<InputT>) wvCoder).getValueCoder());
            final OutputWindowedValueHolder<K, InputT> outputHolder = new OutputWindowedValueHolder<>();
            // use in memory Aggregators since Spark Accumulators are not resilient
            // in stateful operators, once done with this partition.
            final MetricsContainerImpl cellProvider = new MetricsContainerImpl("cellProvider");
            final CounterCell droppedDueToClosedWindow = cellProvider.getCounter(MetricName.named(SparkGroupAlsoByWindowViaWindowSet.class, GroupAlsoByWindowsAggregators.DROPPED_DUE_TO_CLOSED_WINDOW_COUNTER));
            final CounterCell droppedDueToLateness = cellProvider.getCounter(MetricName.named(SparkGroupAlsoByWindowViaWindowSet.class, GroupAlsoByWindowsAggregators.DROPPED_DUE_TO_LATENESS_COUNTER));
            AbstractIterator<Tuple2<ByteArray, Tuple2<StateAndTimers, List<byte[]>>>> /*WV<KV<K, Itr<I>>>*/
            outIter = new AbstractIterator<Tuple2</*K*/
            ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
            List<byte[]>>>>() {

                @Override
                protected Tuple2</*K*/
                ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
                List<byte[]>>> computeNext() {
                    // (possibly) previous-state and (possibly) new data.
                    while (iter.hasNext()) {
                        // for each element in the partition:
                        Tuple3<ByteArray, Seq<byte[]>, Option<Tuple2<StateAndTimers, List<byte[]>>>> next = iter.next();
                        ByteArray encodedKey = next._1();
                        K key = CoderHelpers.fromByteArray(encodedKey.getValue(), keyCoder);
                        Seq<byte[]> seq = next._2();
                        Option<Tuple2<StateAndTimers, List<byte[]>>> prevStateAndTimersOpt = next._3();
                        SparkStateInternals<K> stateInternals;
                        SparkTimerInternals timerInternals = SparkTimerInternals.forStreamFromSources(sourceIds, GlobalWatermarkHolder.get());
                        // get state(internals) per key.
                        if (prevStateAndTimersOpt.isEmpty()) {
                            // no previous state.
                            stateInternals = SparkStateInternals.forKey(key);
                        } else {
                            // with pre-existing state.
                            StateAndTimers prevStateAndTimers = prevStateAndTimersOpt.get()._1();
                            stateInternals = SparkStateInternals.forKeyAndState(key, prevStateAndTimers.getState());
                            Collection<byte[]> serTimers = prevStateAndTimers.getTimers();
                            timerInternals.addTimers(SparkTimerInternals.deserializeTimers(serTimers, timerDataCoder));
                        }
                        ReduceFnRunner<K, InputT, Iterable<InputT>, W> reduceFnRunner = new ReduceFnRunner<>(key, windowingStrategy, ExecutableTriggerStateMachine.create(TriggerStateMachines.stateMachineForTrigger(TriggerTranslation.toProto(windowingStrategy.getTrigger()))), stateInternals, timerInternals, outputHolder, new UnsupportedSideInputReader("GroupAlsoByWindow"), reduceFn, runtimeContext.getPipelineOptions());
                        // clear before potential use.
                        outputHolder.clear();
                        if (!seq.isEmpty()) {
                            // new input for key.
                            try {
                                Iterable<WindowedValue<InputT>> elementsIterable = CoderHelpers.fromByteArray(seq.head(), itrWvCoder);
                                Iterable<WindowedValue<InputT>> validElements = LateDataUtils.dropExpiredWindows(key, elementsIterable, timerInternals, windowingStrategy, droppedDueToLateness);
                                reduceFnRunner.processElements(validElements);
                            } catch (Exception e) {
                                throw new RuntimeException("Failed to process element with ReduceFnRunner", e);
                            }
                        } else if (stateInternals.getState().isEmpty()) {
                            // no input and no state -> GC evict now.
                            continue;
                        }
                        try {
                            // advance the watermark to HWM to fire by timers.
                            timerInternals.advanceWatermark();
                            // call on timers that are ready.
                            reduceFnRunner.onTimers(timerInternals.getTimersReadyToProcess());
                        } catch (Exception e) {
                            throw new RuntimeException("Failed to process ReduceFnRunner onTimer.", e);
                        }
                        // this is mostly symbolic since actual persist is done by emitting output.
                        reduceFnRunner.persist();
                        // obtain output, if fired.
                        List<WindowedValue<KV<K, Iterable<InputT>>>> outputs = outputHolder.get();
                        if (!outputs.isEmpty() || !stateInternals.getState().isEmpty()) {
                            StateAndTimers updated = new StateAndTimers(stateInternals.getState(), SparkTimerInternals.serializeTimers(timerInternals.getTimers(), timerDataCoder));
                            // persist Spark's state by outputting.
                            List<byte[]> serOutput = CoderHelpers.toByteArrays(outputs, wvKvIterCoder);
                            return new Tuple2<>(encodedKey, new Tuple2<>(updated, serOutput));
                        }
                    // an empty state with no output, can be evicted completely - do nothing.
                    }
                    return endOfData();
                }
            };
            // log if there's something to log.
            long lateDropped = droppedDueToLateness.getCumulative();
            if (lateDropped > 0) {
                LOG.info(String.format("Dropped %d elements due to lateness.", lateDropped));
                droppedDueToLateness.inc(-droppedDueToLateness.getCumulative());
            }
            long closedWindowDropped = droppedDueToClosedWindow.getCumulative();
            if (closedWindowDropped > 0) {
                LOG.info(String.format("Dropped %d elements due to closed window.", closedWindowDropped));
                droppedDueToClosedWindow.inc(-droppedDueToClosedWindow.getCumulative());
            }
            return scala.collection.JavaConversions.asScalaIterator(outIter);
        }
    }, partitioner, true, JavaSparkContext$.MODULE$.<Tuple2<StateAndTimers, List<byte[]>>>fakeClassTag());
    if (checkpointDurationMillis > 0) {
        firedStream.checkpoint(new Duration(checkpointDurationMillis));
    }
    // go back to Java now.
    JavaPairDStream<ByteArray, Tuple2<StateAndTimers, List<byte[]>>> /*WV<KV<K, Itr<I>>>*/
    javaFiredStream = JavaPairDStream.fromPairDStream(firedStream, JavaSparkContext$.MODULE$.<ByteArray>fakeClassTag(), JavaSparkContext$.MODULE$.<Tuple2<StateAndTimers, List<byte[]>>>fakeClassTag());
    // filter state-only output (nothing to fire) and remove the state from the output.
    return javaFiredStream.filter(new Function<Tuple2</*K*/
    ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
    List<byte[]>>>, Boolean>() {

        @Override
        public Boolean call(Tuple2</*K*/
        ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
        List<byte[]>>> t2) throws Exception {
            // filter output if defined.
            return !t2._2()._2().isEmpty();
        }
    }).flatMap(new FlatMapFunction<Tuple2</*K*/
    ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
    List<byte[]>>>, WindowedValue<KV<K, Iterable<InputT>>>>() {

        @Override
        public Iterable<WindowedValue<KV<K, Iterable<InputT>>>> call(Tuple2</*K*/
        ByteArray, Tuple2<StateAndTimers, /*WV<KV<K, Itr<I>>>*/
        List<byte[]>>> t2) throws Exception {
            // return in serialized form.
            return CoderHelpers.fromByteArrays(t2._2()._2(), wvKvIterCoder);
        }
    });
}
Also used : MetricsContainerImpl(org.apache.beam.runners.core.metrics.MetricsContainerImpl) CounterCell(org.apache.beam.runners.core.metrics.CounterCell) WindowedValue(org.apache.beam.sdk.util.WindowedValue) OutputWindowedValue(org.apache.beam.runners.core.OutputWindowedValue) ByteArray(org.apache.beam.runners.spark.util.ByteArray) List(java.util.List) ArrayList(java.util.ArrayList) ReduceFnRunner(org.apache.beam.runners.core.ReduceFnRunner) SystemReduceFn(org.apache.beam.runners.core.SystemReduceFn) Duration(org.apache.spark.streaming.Duration) TimerInternals(org.apache.beam.runners.core.TimerInternals) Collection(java.util.Collection) Option(scala.Option) Seq(scala.collection.Seq) FlatMapFunction(org.apache.spark.api.java.function.FlatMapFunction) Function(org.apache.spark.api.java.function.Function) UnsupportedSideInputReader(org.apache.beam.runners.core.UnsupportedSideInputReader) AbstractIterator(com.google.common.collect.AbstractIterator) AbstractIterator(com.google.common.collect.AbstractIterator) Partitioner(org.apache.spark.Partitioner) FullWindowedValueCoder(org.apache.beam.sdk.util.WindowedValue.FullWindowedValueCoder) KV(org.apache.beam.sdk.values.KV) SparkPipelineOptions(org.apache.beam.runners.spark.SparkPipelineOptions) JavaRDD(org.apache.spark.api.java.JavaRDD) Tuple2(scala.Tuple2) Tuple3(scala.Tuple3)

Example 2 with CounterCell

use of org.apache.beam.runners.core.metrics.CounterCell in project beam by apache.

the class WorkItemStatusClientTest method populateCounterUpdatesWithMetricsAndCounters.

/**
 * Validates that Beam Metrics and "internal" Counters are merged in the update.
 */
@Test
public void populateCounterUpdatesWithMetricsAndCounters() throws Exception {
    final CounterUpdate expectedCounter = new CounterUpdate().setNameAndKind(new NameAndKind().setName("some-counter").setKind(Kind.SUM.toString())).setCumulative(true).setInteger(DataflowCounterUpdateExtractor.longToSplitInt(42));
    CounterSet counterSet = new CounterSet();
    counterSet.intSum(CounterName.named("some-counter")).addValue(42);
    final CounterUpdate expectedMetric = new CounterUpdate().setStructuredNameAndMetadata(new CounterStructuredNameAndMetadata().setName(new CounterStructuredName().setOrigin("USER").setOriginNamespace("namespace").setName("some-counter").setOriginalStepName("step")).setMetadata(new CounterMetadata().setKind(Kind.SUM.toString()))).setCumulative(true).setInteger(DataflowCounterUpdateExtractor.longToSplitInt(42));
    MetricsContainerImpl metricsContainer = new MetricsContainerImpl("step");
    BatchModeExecutionContext context = mock(BatchModeExecutionContext.class);
    when(context.extractMetricUpdates(anyBoolean())).thenReturn(ImmutableList.of(expectedMetric));
    when(context.extractMsecCounters(anyBoolean())).thenReturn(Collections.emptyList());
    CounterCell counter = metricsContainer.getCounter(MetricName.named("namespace", "some-counter"));
    counter.inc(1);
    counter.inc(41);
    counter.inc(1);
    counter.inc(-1);
    WorkItemStatus status = new WorkItemStatus();
    when(worker.getOutputCounters()).thenReturn(counterSet);
    when(worker.extractMetricUpdates()).thenReturn(Collections.emptyList());
    statusClient.setWorker(worker, context);
    statusClient.populateCounterUpdates(status);
    assertThat(status.getCounterUpdates(), containsInAnyOrder(expectedCounter, expectedMetric));
}
Also used : CounterMetadata(com.google.api.services.dataflow.model.CounterMetadata) MetricsContainerImpl(org.apache.beam.runners.core.metrics.MetricsContainerImpl) CounterCell(org.apache.beam.runners.core.metrics.CounterCell) WorkItemStatus(com.google.api.services.dataflow.model.WorkItemStatus) CounterSet(org.apache.beam.runners.dataflow.worker.counters.CounterSet) CounterStructuredName(com.google.api.services.dataflow.model.CounterStructuredName) NameAndKind(com.google.api.services.dataflow.model.NameAndKind) CounterStructuredNameAndMetadata(com.google.api.services.dataflow.model.CounterStructuredNameAndMetadata) CounterUpdate(com.google.api.services.dataflow.model.CounterUpdate) Test(org.junit.Test)

Example 3 with CounterCell

use of org.apache.beam.runners.core.metrics.CounterCell in project beam by apache.

the class LateDataUtils method dropExpiredWindows.

/**
 * Returns an {@code Iterable<WindowedValue<InputT>>} that only contains non-late input elements.
 */
public static <K, V> Iterable<WindowedValue<V>> dropExpiredWindows(final K key, Iterable<WindowedValue<V>> elements, final TimerInternals timerInternals, final WindowingStrategy<?, ?> windowingStrategy, final CounterCell droppedDueToLateness) {
    return FluentIterable.from(elements).transformAndConcat(// Explode windows to filter out expired ones
    input -> {
        if (input == null) {
            return null;
        }
        return input.explodeWindows();
    }).filter(input -> {
        if (input == null) {
            // drop null elements.
            return false;
        }
        BoundedWindow window = Iterables.getOnlyElement(input.getWindows());
        boolean expired = window.maxTimestamp().plus(windowingStrategy.getAllowedLateness()).isBefore(timerInternals.currentInputWatermarkTime());
        if (expired) {
            // The element is too late for this window.
            droppedDueToLateness.inc();
            WindowTracing.debug("GroupAlsoByWindow: Dropping element at {} for key: {}; " + "window: {} since it is too far behind inputWatermark: {}", input.getTimestamp(), key, window, timerInternals.currentInputWatermarkTime());
        }
        // Keep the element if the window is not expired.
        return !expired;
    });
}
Also used : WindowedValue(org.apache.beam.sdk.util.WindowedValue) BoundedWindow(org.apache.beam.sdk.transforms.windowing.BoundedWindow) Iterables(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Iterables) Instant(org.joda.time.Instant) Duration(org.joda.time.Duration) GlobalWindow(org.apache.beam.sdk.transforms.windowing.GlobalWindow) FluentIterable(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.FluentIterable) CounterCell(org.apache.beam.runners.core.metrics.CounterCell) WindowTracing(org.apache.beam.sdk.util.WindowTracing) WindowingStrategy(org.apache.beam.sdk.values.WindowingStrategy) BoundedWindow(org.apache.beam.sdk.transforms.windowing.BoundedWindow)

Example 4 with CounterCell

use of org.apache.beam.runners.core.metrics.CounterCell in project beam by apache.

the class FlinkMetricContainerTest method testDropUnexpectedMonitoringInfoTypes.

@Test
public void testDropUnexpectedMonitoringInfoTypes() {
    MetricsContainerImpl step = container.getMetricsContainer("step");
    MonitoringInfo intCounter = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.USER_SUM_INT64).setLabel(MonitoringInfoConstants.Labels.NAMESPACE, "ns1").setLabel(MonitoringInfoConstants.Labels.NAME, "int_counter").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "step").setInt64SumValue(111).build();
    MonitoringInfo doubleCounter = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.USER_SUM_DOUBLE).setLabel(MonitoringInfoConstants.Labels.NAMESPACE, "ns2").setLabel(MonitoringInfoConstants.Labels.NAME, "double_counter").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "step").setDoubleSumValue(222).build();
    MonitoringInfo intDistribution = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.USER_DISTRIBUTION_INT64).setLabel(MonitoringInfoConstants.Labels.NAMESPACE, "ns3").setLabel(MonitoringInfoConstants.Labels.NAME, "int_distribution").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "step").setInt64DistributionValue(DistributionData.create(30, 10, 1, 5)).build();
    MonitoringInfo doubleDistribution = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.USER_DISTRIBUTION_DOUBLE).setLabel(MonitoringInfoConstants.Labels.NAMESPACE, "ns4").setLabel(MonitoringInfoConstants.Labels.NAME, "double_distribution").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "step").setDoubleDistributionValue(10, 30, 1, 5).build();
    // Mock out the counter that Flink returns; the distribution gets created by
    // FlinkMetricContainer, not by Flink itself, so we verify it in a different way below
    SimpleCounter counter = new SimpleCounter();
    when(metricGroup.counter("ns1.int_counter")).thenReturn(counter);
    container.updateMetrics("step", ImmutableList.of(intCounter, doubleCounter, intDistribution, doubleDistribution));
    // Flink's MetricGroup should only have asked for one counter (the integer-typed one) to be
    // created (the double-typed one is dropped currently)
    verify(metricGroup).counter(eq("ns1.int_counter"));
    // Verify that the counter injected into flink has the right value
    assertThat(counter.getCount(), is(111L));
    // Verify the counter in the java SDK MetricsContainer
    long count = ((CounterCell) step.tryGetCounter(MonitoringInfoMetricName.of(intCounter))).getCumulative();
    assertThat(count, is(111L));
    // The one Flink distribution that gets created is a FlinkDistributionGauge; here we verify its
    // initial (and in this test, final) value
    verify(metricGroup).gauge(eq("ns3.int_distribution"), argThat(new ArgumentMatcher<FlinkDistributionGauge>() {

        @Override
        public boolean matches(FlinkDistributionGauge argument) {
            DistributionResult actual = ((FlinkDistributionGauge) argument).getValue();
            DistributionResult expected = DistributionResult.create(30, 10, 1, 5);
            return actual.equals(expected);
        }
    }));
    // Verify that the Java SDK MetricsContainer holds the same information
    DistributionData distributionData = ((DistributionCell) step.getDistribution(MonitoringInfoMetricName.of(intDistribution))).getCumulative();
    assertThat(distributionData, is(DistributionData.create(30, 10, 1, 5)));
}
Also used : MetricsContainerImpl(org.apache.beam.runners.core.metrics.MetricsContainerImpl) CounterCell(org.apache.beam.runners.core.metrics.CounterCell) DistributionResult(org.apache.beam.sdk.metrics.DistributionResult) MonitoringInfo(org.apache.beam.model.pipeline.v1.MetricsApi.MonitoringInfo) SimpleMonitoringInfoBuilder(org.apache.beam.runners.core.metrics.SimpleMonitoringInfoBuilder) SimpleCounter(org.apache.flink.metrics.SimpleCounter) DistributionData(org.apache.beam.runners.core.metrics.DistributionData) ArgumentMatcher(org.mockito.ArgumentMatcher) FlinkDistributionGauge(org.apache.beam.runners.flink.metrics.FlinkMetricContainer.FlinkDistributionGauge) DistributionCell(org.apache.beam.runners.core.metrics.DistributionCell) Test(org.junit.Test)

Example 5 with CounterCell

use of org.apache.beam.runners.core.metrics.CounterCell in project beam by apache.

the class BatchModeExecutionContext method extractThrottleTime.

public Long extractThrottleTime() {
    long totalThrottleMsecs = 0L;
    for (MetricsContainerImpl container : containerRegistry.getContainers()) {
        // TODO(BEAM-7863): Update throttling counters to use generic throttling-msecs metric.
        CounterCell dataStoreThrottlingTime = container.tryGetCounter(MetricName.named(DATASTORE_THROTTLE_TIME_NAMESPACE, THROTTLE_TIME_COUNTER_NAME));
        if (dataStoreThrottlingTime != null) {
            totalThrottleMsecs += dataStoreThrottlingTime.getCumulative();
        }
        CounterCell httpClientApiThrottlingTime = container.tryGetCounter(MetricName.named(HTTP_CLIENT_API_THROTTLE_TIME_NAMESPACE, THROTTLE_TIME_COUNTER_NAME));
        if (httpClientApiThrottlingTime != null) {
            totalThrottleMsecs += httpClientApiThrottlingTime.getCumulative();
        }
        CounterCell bigqueryStreamingInsertThrottleTime = container.tryGetCounter(MetricName.named(BIGQUERY_STREAMING_INSERT_THROTTLE_TIME_NAMESPACE, THROTTLE_TIME_COUNTER_NAME));
        if (bigqueryStreamingInsertThrottleTime != null) {
            totalThrottleMsecs += bigqueryStreamingInsertThrottleTime.getCumulative();
        }
        CounterCell bigqueryReadThrottleTime = container.tryGetCounter(MetricName.named(BIGQUERY_READ_THROTTLE_TIME_NAMESPACE, THROTTLE_TIME_COUNTER_NAME));
        if (bigqueryReadThrottleTime != null) {
            totalThrottleMsecs += bigqueryReadThrottleTime.getCumulative();
        }
        CounterCell throttlingMsecs = container.tryGetCounter(DataflowSystemMetrics.THROTTLING_MSECS_METRIC_NAME);
        if (throttlingMsecs != null) {
            totalThrottleMsecs += throttlingMsecs.getCumulative();
        }
    }
    return TimeUnit.MILLISECONDS.toSeconds(totalThrottleMsecs);
}
Also used : MetricsContainerImpl(org.apache.beam.runners.core.metrics.MetricsContainerImpl) CounterCell(org.apache.beam.runners.core.metrics.CounterCell)

Aggregations

CounterCell (org.apache.beam.runners.core.metrics.CounterCell)5 MetricsContainerImpl (org.apache.beam.runners.core.metrics.MetricsContainerImpl)4 WindowedValue (org.apache.beam.sdk.util.WindowedValue)2 Test (org.junit.Test)2 CounterMetadata (com.google.api.services.dataflow.model.CounterMetadata)1 CounterStructuredName (com.google.api.services.dataflow.model.CounterStructuredName)1 CounterStructuredNameAndMetadata (com.google.api.services.dataflow.model.CounterStructuredNameAndMetadata)1 CounterUpdate (com.google.api.services.dataflow.model.CounterUpdate)1 NameAndKind (com.google.api.services.dataflow.model.NameAndKind)1 WorkItemStatus (com.google.api.services.dataflow.model.WorkItemStatus)1 AbstractIterator (com.google.common.collect.AbstractIterator)1 ArrayList (java.util.ArrayList)1 Collection (java.util.Collection)1 List (java.util.List)1 MonitoringInfo (org.apache.beam.model.pipeline.v1.MetricsApi.MonitoringInfo)1 OutputWindowedValue (org.apache.beam.runners.core.OutputWindowedValue)1 ReduceFnRunner (org.apache.beam.runners.core.ReduceFnRunner)1 SystemReduceFn (org.apache.beam.runners.core.SystemReduceFn)1 TimerInternals (org.apache.beam.runners.core.TimerInternals)1 UnsupportedSideInputReader (org.apache.beam.runners.core.UnsupportedSideInputReader)1