use of org.apache.flink.metrics.Counter in project beam by apache.
the class FlinkMetricContainer method updateCounters.
private void updateCounters(Iterable<MetricResult<Long>> counters) {
for (MetricResult<Long> metricResult : counters) {
String flinkMetricName = getFlinkMetricNameString(metricResult.getKey());
Long update = metricResult.getAttempted();
// update flink metric
Counter counter = flinkCounterCache.computeIfAbsent(flinkMetricName, n -> runtimeContext.getMetricGroup().counter(n));
// Beam counters are already pre-aggregated, just update with the current value here
counter.inc(update - counter.getCount());
}
}
use of org.apache.flink.metrics.Counter in project flink by apache.
the class ReduceCombineDriver method run.
@Override
public void run() throws Exception {
if (LOG.isDebugEnabled()) {
LOG.debug("Combiner starting.");
}
final Counter numRecordsIn = taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsInCounter();
final MutableObjectIterator<T> in = taskContext.getInput(0);
final TypeSerializer<T> serializer = this.serializer;
switch(strategy) {
case SORTED_PARTIAL_REDUCE:
if (objectReuseEnabled) {
T value = serializer.createInstance();
while (running && (value = in.next(value)) != null) {
numRecordsIn.inc();
// try writing to the sorter first
if (sorter.write(value)) {
continue;
}
// do the actual sorting, combining, and data writing
sortAndCombine();
sorter.reset();
// write the value again
if (!sorter.write(value)) {
throw new IOException("Cannot write record to fresh sort buffer. Record too large.");
}
}
} else {
T value;
while (running && (value = in.next()) != null) {
numRecordsIn.inc();
// try writing to the sorter first
if (sorter.write(value)) {
continue;
}
// do the actual sorting, combining, and data writing
sortAndCombine();
sorter.reset();
// write the value again
if (!sorter.write(value)) {
throw new IOException("Cannot write record to fresh sort buffer. Record too large.");
}
}
}
// sort, combine, and send the final batch
sortAndCombine();
break;
case HASHED_PARTIAL_REDUCE:
table.open();
if (objectReuseEnabled) {
T value = serializer.createInstance();
while (running && (value = in.next(value)) != null) {
numRecordsIn.inc();
try {
reduceFacade.updateTableEntryWithReduce(value);
} catch (EOFException ex) {
// the table has run out of memory
reduceFacade.emitAndReset();
// try again
reduceFacade.updateTableEntryWithReduce(value);
}
}
} else {
T value;
while (running && (value = in.next()) != null) {
numRecordsIn.inc();
try {
reduceFacade.updateTableEntryWithReduce(value);
} catch (EOFException ex) {
// the table has run out of memory
reduceFacade.emitAndReset();
// try again
reduceFacade.updateTableEntryWithReduce(value);
}
}
}
// send the final batch
reduceFacade.emit();
table.close();
break;
default:
throw new Exception("Invalid strategy " + taskContext.getTaskConfig().getDriverStrategy() + " for reduce combiner.");
}
}
use of org.apache.flink.metrics.Counter in project flink by apache.
the class ReduceCombineDriver method prepare.
@Override
public void prepare() throws Exception {
final Counter numRecordsOut = taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsOutCounter();
strategy = taskContext.getTaskConfig().getDriverStrategy();
// instantiate the serializer / comparator
final TypeSerializerFactory<T> serializerFactory = taskContext.getInputSerializer(0);
comparator = taskContext.getDriverComparator(0);
serializer = serializerFactory.getSerializer();
reducer = taskContext.getStub();
output = new CountingCollector<>(this.taskContext.getOutputCollector(), numRecordsOut);
MemoryManager memManager = taskContext.getMemoryManager();
final int numMemoryPages = memManager.computeNumberOfPages(taskContext.getTaskConfig().getRelativeMemoryDriver());
memory = memManager.allocatePages(taskContext.getContainingTask(), numMemoryPages);
ExecutionConfig executionConfig = taskContext.getExecutionConfig();
objectReuseEnabled = executionConfig.isObjectReuseEnabled();
if (LOG.isDebugEnabled()) {
LOG.debug("ReduceCombineDriver object reuse: " + (objectReuseEnabled ? "ENABLED" : "DISABLED") + ".");
}
switch(strategy) {
case SORTED_PARTIAL_REDUCE:
// sorter
if (comparator.supportsSerializationWithKeyNormalization() && serializer.getLength() > 0 && serializer.getLength() <= THRESHOLD_FOR_IN_PLACE_SORTING) {
sorter = new FixedLengthRecordSorter<T>(serializer, comparator.duplicate(), memory);
} else {
sorter = new NormalizedKeySorter<T>(serializer, comparator.duplicate(), memory);
}
break;
case HASHED_PARTIAL_REDUCE:
table = new InPlaceMutableHashTable<T>(serializer, comparator, memory);
reduceFacade = table.new ReduceFacade(reducer, output, objectReuseEnabled);
break;
default:
throw new Exception("Invalid strategy " + taskContext.getTaskConfig().getDriverStrategy() + " for reduce combiner.");
}
}
use of org.apache.flink.metrics.Counter in project flink by apache.
the class MapPartitionDriver method run.
@Override
public void run() throws Exception {
final Counter numRecordsIn = this.taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsInCounter();
final Counter numRecordsOut = this.taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsOutCounter();
// cache references on the stack
final MutableObjectIterator<IT> input = new CountingMutableObjectIterator<>(this.taskContext.<IT>getInput(0), numRecordsIn);
final MapPartitionFunction<IT, OT> function = this.taskContext.getStub();
final Collector<OT> output = new CountingCollector<>(this.taskContext.getOutputCollector(), numRecordsOut);
if (objectReuseEnabled) {
final ReusingMutableToRegularIteratorWrapper<IT> inIter = new ReusingMutableToRegularIteratorWrapper<IT>(input, this.taskContext.<IT>getInputSerializer(0).getSerializer());
function.mapPartition(inIter, output);
} else {
final NonReusingMutableToRegularIteratorWrapper<IT> inIter = new NonReusingMutableToRegularIteratorWrapper<IT>(input, this.taskContext.<IT>getInputSerializer(0).getSerializer());
function.mapPartition(inIter, output);
}
}
use of org.apache.flink.metrics.Counter in project flink by apache.
the class NoOpDriver method run.
@Override
public void run() throws Exception {
// cache references on the stack
final Counter numRecordsIn = this.taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsInCounter();
final Counter numRecordsOut = this.taskContext.getMetricGroup().getIOMetricGroup().getNumRecordsOutCounter();
final MutableObjectIterator<T> input = this.taskContext.getInput(0);
final Collector<T> output = new CountingCollector<>(this.taskContext.getOutputCollector(), numRecordsOut);
if (objectReuseEnabled) {
T record = this.taskContext.<T>getInputSerializer(0).getSerializer().createInstance();
while (this.running && ((record = input.next(record)) != null)) {
numRecordsIn.inc();
output.collect(record);
}
} else {
T record;
while (this.running && ((record = input.next()) != null)) {
numRecordsIn.inc();
output.collect(record);
}
}
}
Aggregations