Search in sources :

Example 21 with Counter

use of org.apache.flink.metrics.Counter in project flink by apache.

the class NoOpDriver method run.

@Override
public void run() throws Exception {
    // cache references on the stack
    final Counter numRecordsIn = this.taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsInCounter();
    final Counter numRecordsOut = this.taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsOutCounter();
    final MutableObjectIterator<T> input = this.taskContext.getInput(0);
    final Collector<T> output = new CountingCollector<>(this.taskContext.getOutputCollector(), numRecordsOut);
    if (objectReuseEnabled) {
        T record = this.taskContext.<T>getInputSerializer(0).getSerializer().createInstance();
        while (this.running && ((record = input.next(record)) != null)) {
            numRecordsIn.inc();
            output.collect(record);
        }
    } else {
        T record;
        while (this.running && ((record = input.next()) != null)) {
            numRecordsIn.inc();
            output.collect(record);
        }
    }
}
Also used : CountingCollector(org.apache.flink.runtime.operators.util.metrics.CountingCollector) Counter(org.apache.flink.metrics.Counter)

Example 22 with Counter

use of org.apache.flink.metrics.Counter in project flink by apache.

the class ReduceCombineDriver method prepare.

@Override
public void prepare() throws Exception {
    final Counter numRecordsOut = taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsOutCounter();
    strategy = taskContext.getTaskConfig().getDriverStrategy();
    // instantiate the serializer / comparator
    final TypeSerializerFactory<T> serializerFactory = taskContext.getInputSerializer(0);
    comparator = taskContext.getDriverComparator(0);
    serializer = serializerFactory.getSerializer();
    reducer = taskContext.getStub();
    output = new CountingCollector<>(this.taskContext.getOutputCollector(), numRecordsOut);
    MemoryManager memManager = taskContext.getMemoryManager();
    final int numMemoryPages = memManager.computeNumberOfPages(taskContext.getTaskConfig().getRelativeMemoryDriver());
    memory = memManager.allocatePages(taskContext.getContainingTask(), numMemoryPages);
    ExecutionConfig executionConfig = taskContext.getExecutionConfig();
    objectReuseEnabled = executionConfig.isObjectReuseEnabled();
    if (LOG.isDebugEnabled()) {
        LOG.debug("ReduceCombineDriver object reuse: " + (objectReuseEnabled ? "ENABLED" : "DISABLED") + ".");
    }
    switch(strategy) {
        case SORTED_PARTIAL_REDUCE:
            // instantiate a fix-length in-place sorter, if possible, otherwise the out-of-place sorter
            if (comparator.supportsSerializationWithKeyNormalization() && serializer.getLength() > 0 && serializer.getLength() <= THRESHOLD_FOR_IN_PLACE_SORTING) {
                sorter = new FixedLengthRecordSorter<T>(serializer, comparator.duplicate(), memory);
            } else {
                sorter = new NormalizedKeySorter<T>(serializer, comparator.duplicate(), memory);
            }
            break;
        case HASHED_PARTIAL_REDUCE:
            table = new InPlaceMutableHashTable<T>(serializer, comparator, memory);
            reduceFacade = table.new ReduceFacade(reducer, output, objectReuseEnabled);
            break;
        default:
            throw new Exception("Invalid strategy " + taskContext.getTaskConfig().getDriverStrategy() + " for reduce combiner.");
    }
}
Also used : Counter(org.apache.flink.metrics.Counter) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) IOException(java.io.IOException) EOFException(java.io.EOFException)

Example 23 with Counter

use of org.apache.flink.metrics.Counter in project flink by apache.

the class ReduceCombineDriver method run.

@Override
public void run() throws Exception {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Combiner starting.");
    }
    final Counter numRecordsIn = taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsInCounter();
    final MutableObjectIterator<T> in = taskContext.getInput(0);
    final TypeSerializer<T> serializer = this.serializer;
    switch(strategy) {
        case SORTED_PARTIAL_REDUCE:
            if (objectReuseEnabled) {
                T value = serializer.createInstance();
                while (running && (value = in.next(value)) != null) {
                    numRecordsIn.inc();
                    // try writing to the sorter first
                    if (sorter.write(value)) {
                        continue;
                    }
                    // do the actual sorting, combining, and data writing
                    sortAndCombine();
                    sorter.reset();
                    // write the value again
                    if (!sorter.write(value)) {
                        throw new IOException("Cannot write record to fresh sort buffer. Record too large.");
                    }
                }
            } else {
                T value;
                while (running && (value = in.next()) != null) {
                    numRecordsIn.inc();
                    // try writing to the sorter first
                    if (sorter.write(value)) {
                        continue;
                    }
                    // do the actual sorting, combining, and data writing
                    sortAndCombine();
                    sorter.reset();
                    // write the value again
                    if (!sorter.write(value)) {
                        throw new IOException("Cannot write record to fresh sort buffer. Record too large.");
                    }
                }
            }
            // sort, combine, and send the final batch
            sortAndCombine();
            break;
        case HASHED_PARTIAL_REDUCE:
            table.open();
            if (objectReuseEnabled) {
                T value = serializer.createInstance();
                while (running && (value = in.next(value)) != null) {
                    numRecordsIn.inc();
                    try {
                        reduceFacade.updateTableEntryWithReduce(value);
                    } catch (EOFException ex) {
                        // the table has run out of memory
                        reduceFacade.emitAndReset();
                        // try again
                        reduceFacade.updateTableEntryWithReduce(value);
                    }
                }
            } else {
                T value;
                while (running && (value = in.next()) != null) {
                    numRecordsIn.inc();
                    try {
                        reduceFacade.updateTableEntryWithReduce(value);
                    } catch (EOFException ex) {
                        // the table has run out of memory
                        reduceFacade.emitAndReset();
                        // try again
                        reduceFacade.updateTableEntryWithReduce(value);
                    }
                }
            }
            // send the final batch
            reduceFacade.emit();
            table.close();
            break;
        default:
            throw new Exception("Invalid strategy " + taskContext.getTaskConfig().getDriverStrategy() + " for reduce combiner.");
    }
}
Also used : Counter(org.apache.flink.metrics.Counter) EOFException(java.io.EOFException) IOException(java.io.IOException) IOException(java.io.IOException) EOFException(java.io.EOFException)

Example 24 with Counter

use of org.apache.flink.metrics.Counter in project beam by apache.

the class FlinkMetricContainer method updateCounters.

private void updateCounters(Iterable<MetricResult<Long>> counters) {
    for (MetricResult<Long> metricResult : counters) {
        String flinkMetricName = getFlinkMetricNameString(COUNTER_PREFIX, metricResult);
        Long update = metricResult.attempted();
        // update flink metric
        Counter counter = flinkCounterCache.get(flinkMetricName);
        if (counter == null) {
            counter = runtimeContext.getMetricGroup().counter(flinkMetricName);
            flinkCounterCache.put(flinkMetricName, counter);
        }
        counter.dec(counter.getCount());
        counter.inc(update);
    }
}
Also used : Counter(org.apache.flink.metrics.Counter)

Example 25 with Counter

use of org.apache.flink.metrics.Counter in project flink by apache.

the class MetricQueryServiceTest method testCreateDump.

@Test
public void testCreateDump() throws Exception {
    ActorSystem s = AkkaUtils.createLocalActorSystem(new Configuration());
    ActorRef serviceActor = MetricQueryService.startMetricQueryService(s, null);
    TestActorRef testActorRef = TestActorRef.create(s, Props.create(TestActor.class));
    TestActor testActor = (TestActor) testActorRef.underlyingActor();
    final Counter c = new SimpleCounter();
    final Gauge<String> g = new Gauge<String>() {

        @Override
        public String getValue() {
            return "Hello";
        }
    };
    final Histogram h = new TestingHistogram();
    final Meter m = new Meter() {

        @Override
        public void markEvent() {
        }

        @Override
        public void markEvent(long n) {
        }

        @Override
        public double getRate() {
            return 5;
        }

        @Override
        public long getCount() {
            return 10;
        }
    };
    MetricRegistry registry = new MetricRegistry(MetricRegistryConfiguration.defaultMetricRegistryConfiguration());
    final TaskManagerMetricGroup tm = new TaskManagerMetricGroup(registry, "host", "id");
    MetricQueryService.notifyOfAddedMetric(serviceActor, c, "counter", tm);
    MetricQueryService.notifyOfAddedMetric(serviceActor, g, "gauge", tm);
    MetricQueryService.notifyOfAddedMetric(serviceActor, h, "histogram", tm);
    MetricQueryService.notifyOfAddedMetric(serviceActor, m, "meter", tm);
    serviceActor.tell(MetricQueryService.getCreateDump(), testActorRef);
    synchronized (testActor.lock) {
        if (testActor.message == null) {
            testActor.lock.wait();
        }
    }
    MetricDumpSerialization.MetricSerializationResult dump = (MetricDumpSerialization.MetricSerializationResult) testActor.message;
    testActor.message = null;
    assertTrue(dump.serializedMetrics.length > 0);
    MetricQueryService.notifyOfRemovedMetric(serviceActor, c);
    MetricQueryService.notifyOfRemovedMetric(serviceActor, g);
    MetricQueryService.notifyOfRemovedMetric(serviceActor, h);
    MetricQueryService.notifyOfRemovedMetric(serviceActor, m);
    serviceActor.tell(MetricQueryService.getCreateDump(), testActorRef);
    synchronized (testActor.lock) {
        if (testActor.message == null) {
            testActor.lock.wait();
        }
    }
    MetricDumpSerialization.MetricSerializationResult emptyDump = (MetricDumpSerialization.MetricSerializationResult) testActor.message;
    testActor.message = null;
    assertEquals(0, emptyDump.serializedMetrics.length);
    s.shutdown();
}
Also used : ActorSystem(akka.actor.ActorSystem) TestingHistogram(org.apache.flink.runtime.metrics.util.TestingHistogram) Histogram(org.apache.flink.metrics.Histogram) Configuration(org.apache.flink.configuration.Configuration) MetricRegistryConfiguration(org.apache.flink.runtime.metrics.MetricRegistryConfiguration) Meter(org.apache.flink.metrics.Meter) TestActorRef(akka.testkit.TestActorRef) ActorRef(akka.actor.ActorRef) MetricRegistry(org.apache.flink.runtime.metrics.MetricRegistry) TaskManagerMetricGroup(org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup) TestActorRef(akka.testkit.TestActorRef) Gauge(org.apache.flink.metrics.Gauge) SimpleCounter(org.apache.flink.metrics.SimpleCounter) Counter(org.apache.flink.metrics.Counter) SimpleCounter(org.apache.flink.metrics.SimpleCounter) TestingHistogram(org.apache.flink.runtime.metrics.util.TestingHistogram) Test(org.junit.Test)

Aggregations

Counter (org.apache.flink.metrics.Counter)39 CountingCollector (org.apache.flink.runtime.operators.util.metrics.CountingCollector)18 CountingMutableObjectIterator (org.apache.flink.runtime.operators.util.metrics.CountingMutableObjectIterator)10 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)8 Histogram (org.apache.flink.metrics.Histogram)8 Meter (org.apache.flink.metrics.Meter)8 Gauge (org.apache.flink.metrics.Gauge)7 SimpleCounter (org.apache.flink.metrics.SimpleCounter)7 Test (org.junit.Test)7 TaskConfig (org.apache.flink.runtime.operators.util.TaskConfig)5 Configuration (org.apache.flink.configuration.Configuration)4 MetricRegistry (org.apache.flink.runtime.metrics.MetricRegistry)4 MetricRegistryConfiguration (org.apache.flink.runtime.metrics.MetricRegistryConfiguration)4 TaskManagerMetricGroup (org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup)4 SpillingResettableMutableObjectIterator (org.apache.flink.runtime.operators.resettable.SpillingResettableMutableObjectIterator)4 IOException (java.io.IOException)3 Tuple2 (org.apache.flink.api.java.tuple.Tuple2)3 TaskManagerJobMetricGroup (org.apache.flink.runtime.metrics.groups.TaskManagerJobMetricGroup)3 TaskMetricGroup (org.apache.flink.runtime.metrics.groups.TaskMetricGroup)3 TestingHistogram (org.apache.flink.runtime.metrics.util.TestingHistogram)3