Search in sources :

Example 1 with Counter

use of org.apache.beam.sdk.metrics.Counter in project beam by apache.

the class BatchModeExecutionContextTest method extractMetricUpdatesCounter.

@Test
public void extractMetricUpdatesCounter() {
    BatchModeExecutionContext executionContext = BatchModeExecutionContext.forTesting(PipelineOptionsFactory.create(), "testStage");
    DataflowOperationContext operationContext = executionContext.createOperationContext(NameContextsForTests.nameContextForTest());
    Counter counter = operationContext.metricsContainer().getCounter(MetricName.named("namespace", "some-counter"));
    counter.inc(1);
    counter.inc(41);
    counter.inc(1);
    counter.inc(-1);
    final CounterUpdate expected = new CounterUpdate().setStructuredNameAndMetadata(new CounterStructuredNameAndMetadata().setName(new CounterStructuredName().setOrigin("USER").setOriginNamespace("namespace").setName("some-counter").setOriginalStepName("originalName")).setMetadata(new CounterMetadata().setKind(Kind.SUM.toString()))).setCumulative(true).setInteger(longToSplitInt(42));
    assertThat(executionContext.extractMetricUpdates(false), containsInAnyOrder(expected));
    executionContext.commitMetricUpdates();
    Counter counterUncommitted = operationContext.metricsContainer().getCounter(MetricName.named("namespace", "uncommitted-counter"));
    counterUncommitted.inc(64);
    final CounterUpdate expectedUncommitted = new CounterUpdate().setStructuredNameAndMetadata(new CounterStructuredNameAndMetadata().setName(new CounterStructuredName().setOrigin("USER").setOriginNamespace("namespace").setName("uncommitted-counter").setOriginalStepName("originalName")).setMetadata(new CounterMetadata().setKind(Kind.SUM.toString()))).setCumulative(true).setInteger(longToSplitInt(64));
    // Expect to get only the uncommitted metric, unless final update.
    assertThat(executionContext.extractMetricUpdates(false), containsInAnyOrder(expectedUncommitted));
    assertThat(executionContext.extractMetricUpdates(true), containsInAnyOrder(expected, expectedUncommitted));
    executionContext.commitMetricUpdates();
    // All Metrics are committed, expect none unless final update.
    assertThat(executionContext.extractMetricUpdates(false), emptyIterable());
    assertThat(executionContext.extractMetricUpdates(true), containsInAnyOrder(expected, expectedUncommitted));
}
Also used : CounterMetadata(com.google.api.services.dataflow.model.CounterMetadata) Counter(org.apache.beam.sdk.metrics.Counter) CounterStructuredName(com.google.api.services.dataflow.model.CounterStructuredName) CounterStructuredNameAndMetadata(com.google.api.services.dataflow.model.CounterStructuredNameAndMetadata) CounterUpdate(com.google.api.services.dataflow.model.CounterUpdate) Test(org.junit.Test)

Example 2 with Counter

use of org.apache.beam.sdk.metrics.Counter in project beam by apache.

the class BatchModeExecutionContextTest method extractThrottleTimeCounters.

@Test
public void extractThrottleTimeCounters() {
    BatchModeExecutionContext executionContext = BatchModeExecutionContext.forTesting(PipelineOptionsFactory.create(), "testStage");
    DataflowOperationContext operationContext = executionContext.createOperationContext(NameContextsForTests.nameContextForTest());
    Counter counter = operationContext.metricsContainer().getCounter(MetricName.named(BatchModeExecutionContext.DATASTORE_THROTTLE_TIME_NAMESPACE, BatchModeExecutionContext.THROTTLE_TIME_COUNTER_NAME));
    counter.inc(12000);
    counter.inc(17000);
    counter.inc(1000);
    assertEquals(30L, (long) executionContext.extractThrottleTime());
}
Also used : Counter(org.apache.beam.sdk.metrics.Counter) Test(org.junit.Test)

Example 3 with Counter

use of org.apache.beam.sdk.metrics.Counter in project beam by apache.

the class HarnessMonitoringInfosInstructionHandlerTest method testReturnsProcessWideMonitoringInfos.

@Test
public void testReturnsProcessWideMonitoringInfos() {
    MetricsEnvironment.setProcessWideContainer(MetricsContainerImpl.createProcessWideContainer());
    HashMap<String, String> labels = new HashMap<String, String>();
    labels.put(MonitoringInfoConstants.Labels.SERVICE, "service");
    labels.put(MonitoringInfoConstants.Labels.METHOD, "method");
    labels.put(MonitoringInfoConstants.Labels.RESOURCE, "resource");
    labels.put(MonitoringInfoConstants.Labels.PTRANSFORM, "transform");
    labels.put(MonitoringInfoConstants.Labels.STATUS, "ok");
    MonitoringInfoMetricName name = MonitoringInfoMetricName.named(MonitoringInfoConstants.Urns.API_REQUEST_COUNT, labels);
    Counter counter = LabeledMetrics.counter(name, true);
    counter.inc(7);
    ShortIdMap metricsShortIds = new ShortIdMap();
    HarnessMonitoringInfosInstructionHandler testObject = new HarnessMonitoringInfosInstructionHandler(metricsShortIds);
    BeamFnApi.InstructionRequest.Builder builder = BeamFnApi.InstructionRequest.newBuilder();
    BeamFnApi.InstructionResponse.Builder responseBuilder = testObject.harnessMonitoringInfos(builder.build());
    BeamFnApi.InstructionResponse response = responseBuilder.build();
    assertEquals(1, response.getHarnessMonitoringInfos().getMonitoringDataMap().size());
    // Expect a payload to be set for "metric0".
    assertTrue(!response.getHarnessMonitoringInfos().getMonitoringDataMap().get("metric0").isEmpty());
}
Also used : HashMap(java.util.HashMap) BeamFnApi(org.apache.beam.model.fnexecution.v1.BeamFnApi) ShortIdMap(org.apache.beam.runners.core.metrics.ShortIdMap) MonitoringInfoMetricName(org.apache.beam.runners.core.metrics.MonitoringInfoMetricName) Counter(org.apache.beam.sdk.metrics.Counter) Test(org.junit.Test)

Example 4 with Counter

use of org.apache.beam.sdk.metrics.Counter in project beam by apache.

the class WatchKafkaTopicPartitionDoFn method onTimer.

@OnTimer(TIMER_ID)
public void onTimer(@TimerId(TIMER_ID) Timer timer, @StateId(STATE_ID) BagState<TopicPartition> existingTopicPartitions, OutputReceiver<KafkaSourceDescriptor> outputReceiver) {
    Set<TopicPartition> readingTopicPartitions = new HashSet<>();
    existingTopicPartitions.read().forEach(topicPartition -> {
        readingTopicPartitions.add(topicPartition);
    });
    existingTopicPartitions.clear();
    Set<TopicPartition> currentAll = this.getAllTopicPartitions();
    // Emit new added TopicPartitions.
    Set<TopicPartition> newAdded = Sets.difference(currentAll, readingTopicPartitions);
    newAdded.forEach(topicPartition -> {
        if (checkStopReadingFn == null || !checkStopReadingFn.apply(topicPartition)) {
            Counter foundedTopicPartition = Metrics.counter(COUNTER_NAMESPACE, topicPartition.toString());
            foundedTopicPartition.inc();
            outputReceiver.output(KafkaSourceDescriptor.of(topicPartition, null, startReadTime, null, stopReadTime, null));
        }
    });
    // Update the State.
    currentAll.forEach(topicPartition -> {
        if (checkStopReadingFn == null || !checkStopReadingFn.apply(topicPartition)) {
            existingTopicPartitions.add(topicPartition);
        }
    });
    // Reset the timer.
    timer.set(Instant.now().plus(Duration.millis(checkDuration.getMillis())));
}
Also used : Counter(org.apache.beam.sdk.metrics.Counter) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Example 5 with Counter

use of org.apache.beam.sdk.metrics.Counter in project beam by apache.

the class Query13 method expand.

@Override
public PCollection<Event> expand(PCollection<Event> events) {
    final Coder<Event> coder = events.getCoder();
    return events.apply("Pair with random key", ParDo.of(new AssignShardFn<>(configuration.numKeyBuckets))).apply(GroupByKey.create()).apply("ExpandIterable", ParDo.of(new DoFn<KV<Integer, Iterable<Event>>, Event>() {

        @ProcessElement
        public void processElement(@Element KV<Integer, Iterable<Event>> element, OutputReceiver<Event> r) {
            for (Event value : element.getValue()) {
                r.output(value);
            }
        }
    })).apply(name + ".Serialize", ParDo.of(new DoFn<Event, Event>() {

        private final Counter bytesMetric = Metrics.counter(name, "serde-bytes");

        private final Random random = new Random();

        private double pardoCPUFactor = (configuration.pardoCPUFactor >= 0.0 && configuration.pardoCPUFactor <= 1.0) ? configuration.pardoCPUFactor : 1.0;

        @ProcessElement
        public void processElement(ProcessContext c) throws CoderException, IOException {
            Event event;
            if (random.nextDouble() <= pardoCPUFactor) {
                event = encodeDecode(coder, c.element(), bytesMetric);
            } else {
                event = c.element();
            }
            c.output(event);
        }
    }));
}
Also used : KV(org.apache.beam.sdk.values.KV) DoFn(org.apache.beam.sdk.transforms.DoFn) Counter(org.apache.beam.sdk.metrics.Counter) Random(java.util.Random) Event(org.apache.beam.sdk.nexmark.model.Event)

Aggregations

Counter (org.apache.beam.sdk.metrics.Counter)14 Test (org.junit.Test)7 DoFn (org.apache.beam.sdk.transforms.DoFn)4 HashMap (java.util.HashMap)3 Event (org.apache.beam.sdk.nexmark.model.Event)3 KV (org.apache.beam.sdk.values.KV)3 MonitoringInfoMetricName (org.apache.beam.runners.core.metrics.MonitoringInfoMetricName)2 MetricsContainer (org.apache.beam.sdk.metrics.MetricsContainer)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 Nullable (org.checkerframework.checker.nullness.qual.Nullable)2 CounterMetadata (com.google.api.services.dataflow.model.CounterMetadata)1 CounterStructuredName (com.google.api.services.dataflow.model.CounterStructuredName)1 CounterStructuredNameAndMetadata (com.google.api.services.dataflow.model.CounterStructuredNameAndMetadata)1 CounterUpdate (com.google.api.services.dataflow.model.CounterUpdate)1 ByteArrayInputStream (java.io.ByteArrayInputStream)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 IOException (java.io.IOException)1 OutputStream (java.io.OutputStream)1 HashSet (java.util.HashSet)1 Random (java.util.Random)1