use of org.apache.flink.metrics.SimpleCounter in project beam by apache.
the class FlinkMetricContainerTest method testCounter.
@Test
public void testCounter() {
SimpleCounter flinkCounter = new SimpleCounter();
when(metricGroup.counter("namespace.name")).thenReturn(flinkCounter);
MetricsContainer step = container.getMetricsContainer("step");
MetricName metricName = MetricName.named("namespace", "name");
Counter counter = step.getCounter(metricName);
counter.inc();
counter.inc();
assertThat(flinkCounter.getCount(), is(0L));
container.updateMetrics("step");
assertThat(flinkCounter.getCount(), is(2L));
}
use of org.apache.flink.metrics.SimpleCounter in project beam by apache.
the class FlinkMetricContainerTest method testMonitoringInfoUpdate.
@Test
public void testMonitoringInfoUpdate() {
SimpleCounter userCounter = new SimpleCounter();
when(metricGroup.counter("ns1.metric1")).thenReturn(userCounter);
SimpleCounter pCollectionCounter = new SimpleCounter();
when(metricGroup.counter("pcoll.metric:element_count:v1")).thenReturn(pCollectionCounter);
SimpleCounter pTransformCounter = new SimpleCounter();
when(metricGroup.counter("anyPTransform.myMetric")).thenReturn(pTransformCounter);
MonitoringInfo userCountMonitoringInfo = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.USER_SUM_INT64).setLabel(MonitoringInfoConstants.Labels.NAMESPACE, "ns1").setLabel(MonitoringInfoConstants.Labels.NAME, "metric1").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "anyPTransform").setInt64SumValue(111).build();
assertNotNull(userCountMonitoringInfo);
MonitoringInfo pCollectionScoped = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT).setInt64SumValue(222).setLabel(MonitoringInfoConstants.Labels.PCOLLECTION, "pcoll").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "anyPTransform").build();
assertNotNull(pCollectionScoped);
MonitoringInfo transformScoped = new SimpleMonitoringInfoBuilder().setUrn(MonitoringInfoConstants.Urns.START_BUNDLE_MSECS).setInt64SumValue(333).setLabel(MonitoringInfoConstants.Labels.NAME, "myMetric").setLabel(MonitoringInfoConstants.Labels.PTRANSFORM, "anyPTransform").build();
assertNotNull(transformScoped);
assertThat(userCounter.getCount(), is(0L));
assertThat(pCollectionCounter.getCount(), is(0L));
assertThat(pTransformCounter.getCount(), is(0L));
container.updateMetrics("step", ImmutableList.of(userCountMonitoringInfo, pCollectionScoped, transformScoped));
assertThat(userCounter.getCount(), is(111L));
assertThat(pCollectionCounter.getCount(), is(222L));
assertThat(pTransformCounter.getCount(), is(333L));
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class MetricQueryServiceTest method testCreateDump.
@Test
public void testCreateDump() throws Exception {
MetricQueryService queryService = MetricQueryService.createMetricQueryService(rpcService, ResourceID.generate(), Long.MAX_VALUE);
queryService.start();
final Counter c = new SimpleCounter();
final Gauge<String> g = () -> "Hello";
final Histogram h = new TestHistogram();
final Meter m = new TestMeter();
final TaskManagerMetricGroup tm = UnregisteredMetricGroups.createUnregisteredTaskManagerMetricGroup();
queryService.addMetric("counter", c, tm);
queryService.addMetric("gauge", g, tm);
queryService.addMetric("histogram", h, tm);
queryService.addMetric("meter", m, tm);
MetricDumpSerialization.MetricSerializationResult dump = queryService.queryMetrics(TIMEOUT).get();
assertTrue(dump.serializedCounters.length > 0);
assertTrue(dump.serializedGauges.length > 0);
assertTrue(dump.serializedHistograms.length > 0);
assertTrue(dump.serializedMeters.length > 0);
queryService.removeMetric(c);
queryService.removeMetric(g);
queryService.removeMetric(h);
queryService.removeMetric(m);
MetricDumpSerialization.MetricSerializationResult emptyDump = queryService.queryMetrics(TIMEOUT).get();
assertEquals(0, emptyDump.serializedCounters.length);
assertEquals(0, emptyDump.serializedGauges.length);
assertEquals(0, emptyDump.serializedHistograms.length);
assertEquals(0, emptyDump.serializedMeters.length);
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class StreamNetworkBenchmarkEnvironment method createInputGateWithMetrics.
private IndexedInputGate createInputGateWithMetrics(SingleInputGateFactory gateFactory, InputGateDeploymentDescriptor gateDescriptor, int gateIndex) {
final TaskMetricGroup taskMetricGroup = UnregisteredMetricGroups.createUnregisteredTaskMetricGroup();
final SingleInputGate singleGate = gateFactory.create(receiverEnv.createShuffleIOOwnerContext("receiving task[" + gateIndex + "]", taskMetricGroup.executionId(), taskMetricGroup), gateIndex, gateDescriptor, SingleInputGateBuilder.NO_OP_PRODUCER_CHECKER);
return new InputGateWithMetrics(singleGate, new SimpleCounter());
}
use of org.apache.flink.metrics.SimpleCounter in project flink by apache.
the class DataSourceTask method invoke.
@Override
public void invoke() throws Exception {
// --------------------------------------------------------------------
// Initialize
// --------------------------------------------------------------------
initInputFormat();
LOG.debug(getLogString("Start registering input and output"));
try {
initOutputs(getEnvironment().getUserCodeClassLoader());
} catch (Exception ex) {
throw new RuntimeException("The initialization of the DataSource's outputs caused an error: " + ex.getMessage(), ex);
}
LOG.debug(getLogString("Finished registering input and output"));
// --------------------------------------------------------------------
// Invoke
// --------------------------------------------------------------------
LOG.debug(getLogString("Starting data source operator"));
RuntimeContext ctx = createRuntimeContext();
final Counter numRecordsOut;
{
Counter tmpNumRecordsOut;
try {
InternalOperatorIOMetricGroup ioMetricGroup = ((InternalOperatorMetricGroup) ctx.getMetricGroup()).getIOMetricGroup();
ioMetricGroup.reuseInputMetricsForTask();
if (this.config.getNumberOfChainedStubs() == 0) {
ioMetricGroup.reuseOutputMetricsForTask();
}
tmpNumRecordsOut = ioMetricGroup.getNumRecordsOutCounter();
} catch (Exception e) {
LOG.warn("An exception occurred during the metrics setup.", e);
tmpNumRecordsOut = new SimpleCounter();
}
numRecordsOut = tmpNumRecordsOut;
}
Counter completedSplitsCounter = ctx.getMetricGroup().counter("numSplitsProcessed");
if (RichInputFormat.class.isAssignableFrom(this.format.getClass())) {
((RichInputFormat) this.format).setRuntimeContext(ctx);
LOG.debug(getLogString("Rich Source detected. Initializing runtime context."));
((RichInputFormat) this.format).openInputFormat();
LOG.debug(getLogString("Rich Source detected. Opening the InputFormat."));
}
ExecutionConfig executionConfig = getExecutionConfig();
boolean objectReuseEnabled = executionConfig.isObjectReuseEnabled();
LOG.debug("DataSourceTask object reuse: " + (objectReuseEnabled ? "ENABLED" : "DISABLED") + ".");
final TypeSerializer<OT> serializer = this.serializerFactory.getSerializer();
try {
// start all chained tasks
BatchTask.openChainedTasks(this.chainedTasks, this);
// get input splits to read
final Iterator<InputSplit> splitIterator = getInputSplits();
// for each assigned input split
while (!this.taskCanceled && splitIterator.hasNext()) {
// get start and end
final InputSplit split = splitIterator.next();
LOG.debug(getLogString("Opening input split " + split.toString()));
final InputFormat<OT, InputSplit> format = this.format;
// open input format
format.open(split);
LOG.debug(getLogString("Starting to read input from split " + split.toString()));
try {
final Collector<OT> output = new CountingCollector<>(this.output, numRecordsOut);
if (objectReuseEnabled) {
OT reuse = serializer.createInstance();
// as long as there is data to read
while (!this.taskCanceled && !format.reachedEnd()) {
OT returned;
if ((returned = format.nextRecord(reuse)) != null) {
output.collect(returned);
}
}
} else {
// as long as there is data to read
while (!this.taskCanceled && !format.reachedEnd()) {
OT returned;
if ((returned = format.nextRecord(serializer.createInstance())) != null) {
output.collect(returned);
}
}
}
if (LOG.isDebugEnabled() && !this.taskCanceled) {
LOG.debug(getLogString("Closing input split " + split.toString()));
}
} finally {
// close. We close here such that a regular close throwing an exception marks a
// task as failed.
format.close();
}
completedSplitsCounter.inc();
}
// end for all input splits
// close all chained tasks letting them report failure
BatchTask.closeChainedTasks(this.chainedTasks, this);
// close the output collector
this.output.close();
} catch (Exception ex) {
// cause
try {
this.format.close();
} catch (Throwable ignored) {
}
BatchTask.cancelChainedTasks(this.chainedTasks);
ex = ExceptionInChainedStubException.exceptionUnwrap(ex);
if (ex instanceof CancelTaskException) {
// forward canceling exception
throw ex;
} else if (!this.taskCanceled) {
// drop exception, if the task was canceled
BatchTask.logAndThrowException(ex, this);
}
} finally {
BatchTask.clearWriters(eventualOutputs);
// --------------------------------------------------------------------
if (this.format != null && RichInputFormat.class.isAssignableFrom(this.format.getClass())) {
((RichInputFormat) this.format).closeInputFormat();
LOG.debug(getLogString("Rich Source detected. Closing the InputFormat."));
}
}
if (!this.taskCanceled) {
LOG.debug(getLogString("Finished data source operator"));
} else {
LOG.debug(getLogString("Data source operator cancelled"));
}
}
Aggregations