use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class MetricUtils method instantiateMetaspaceMemoryMetrics.
@VisibleForTesting
static void instantiateMetaspaceMemoryMetrics(final MetricGroup parentMetricGroup) {
final List<MemoryPoolMXBean> memoryPoolMXBeans = ManagementFactory.getMemoryPoolMXBeans().stream().filter(bean -> "Metaspace".equals(bean.getName())).collect(Collectors.toList());
if (memoryPoolMXBeans.isEmpty()) {
LOG.info("The '{}' metrics will not be exposed because no pool named 'Metaspace' could be found. This might be caused by the used JVM.", METRIC_GROUP_METASPACE_NAME);
return;
}
final MetricGroup metricGroup = parentMetricGroup.addGroup(METRIC_GROUP_METASPACE_NAME);
final Iterator<MemoryPoolMXBean> beanIterator = memoryPoolMXBeans.iterator();
final MemoryPoolMXBean firstPool = beanIterator.next();
instantiateMemoryUsageMetrics(metricGroup, firstPool::getUsage);
if (beanIterator.hasNext()) {
LOG.debug("More than one memory pool named 'Metaspace' is present. Only the first pool was used for instantiating the '{}' metrics.", METRIC_GROUP_METASPACE_NAME);
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class Execution method registerProducedPartitions.
@VisibleForTesting
static CompletableFuture<Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor>> registerProducedPartitions(ExecutionVertex vertex, TaskManagerLocation location, ExecutionAttemptID attemptId, boolean notifyPartitionDataAvailable) {
ProducerDescriptor producerDescriptor = ProducerDescriptor.create(location, attemptId);
Collection<IntermediateResultPartition> partitions = vertex.getProducedPartitions().values();
Collection<CompletableFuture<ResultPartitionDeploymentDescriptor>> partitionRegistrations = new ArrayList<>(partitions.size());
for (IntermediateResultPartition partition : partitions) {
PartitionDescriptor partitionDescriptor = PartitionDescriptor.from(partition);
int maxParallelism = getPartitionMaxParallelism(partition);
CompletableFuture<? extends ShuffleDescriptor> shuffleDescriptorFuture = vertex.getExecutionGraphAccessor().getShuffleMaster().registerPartitionWithProducer(vertex.getJobId(), partitionDescriptor, producerDescriptor);
CompletableFuture<ResultPartitionDeploymentDescriptor> partitionRegistration = shuffleDescriptorFuture.thenApply(shuffleDescriptor -> new ResultPartitionDeploymentDescriptor(partitionDescriptor, shuffleDescriptor, maxParallelism, notifyPartitionDataAvailable));
partitionRegistrations.add(partitionRegistration);
}
return FutureUtils.combineAll(partitionRegistrations).thenApply(rpdds -> {
Map<IntermediateResultPartitionID, ResultPartitionDeploymentDescriptor> producedPartitions = new LinkedHashMap<>(partitions.size());
rpdds.forEach(rpdd -> producedPartitions.put(rpdd.getPartitionId(), rpdd));
return producedPartitions;
});
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class MetricStore method add.
@VisibleForTesting
public void add(MetricDump metric) {
try {
QueryScopeInfo info = metric.scopeInfo;
TaskManagerMetricStore tm;
JobMetricStore job;
TaskMetricStore task;
ComponentMetricStore subtask;
String name = info.scope.isEmpty() ? metric.name : info.scope + "." + metric.name;
if (name.isEmpty()) {
// malformed transmission
return;
}
switch(info.getCategory()) {
case INFO_CATEGORY_JM:
addMetric(jobManager.metrics, name, metric);
break;
case INFO_CATEGORY_TM:
String tmID = ((QueryScopeInfo.TaskManagerQueryScopeInfo) info).taskManagerID;
tm = taskManagers.computeIfAbsent(tmID, k -> new TaskManagerMetricStore());
if (name.contains("GarbageCollector")) {
String gcName = name.substring("Status.JVM.GarbageCollector.".length(), name.lastIndexOf('.'));
tm.addGarbageCollectorName(gcName);
}
addMetric(tm.metrics, name, metric);
break;
case INFO_CATEGORY_JOB:
QueryScopeInfo.JobQueryScopeInfo jobInfo = (QueryScopeInfo.JobQueryScopeInfo) info;
job = jobs.computeIfAbsent(jobInfo.jobID, k -> new JobMetricStore());
addMetric(job.metrics, name, metric);
break;
case INFO_CATEGORY_TASK:
QueryScopeInfo.TaskQueryScopeInfo taskInfo = (QueryScopeInfo.TaskQueryScopeInfo) info;
job = jobs.computeIfAbsent(taskInfo.jobID, k -> new JobMetricStore());
task = job.tasks.computeIfAbsent(taskInfo.vertexID, k -> new TaskMetricStore());
subtask = task.subtasks.computeIfAbsent(taskInfo.subtaskIndex, k -> new ComponentMetricStore());
/**
* The duplication is intended. Metrics scoped by subtask are useful for several
* job/task handlers, while the WebInterface task metric queries currently do
* not account for subtasks, so we don't divide by subtask and instead use the
* concatenation of subtask index and metric name as the name for those.
*/
addMetric(subtask.metrics, name, metric);
addMetric(task.metrics, taskInfo.subtaskIndex + "." + name, metric);
break;
case INFO_CATEGORY_OPERATOR:
QueryScopeInfo.OperatorQueryScopeInfo operatorInfo = (QueryScopeInfo.OperatorQueryScopeInfo) info;
job = jobs.computeIfAbsent(operatorInfo.jobID, k -> new JobMetricStore());
task = job.tasks.computeIfAbsent(operatorInfo.vertexID, k -> new TaskMetricStore());
subtask = task.subtasks.computeIfAbsent(operatorInfo.subtaskIndex, k -> new ComponentMetricStore());
/**
* As the WebInterface does not account for operators (because it can't) we
* don't divide by operator and instead use the concatenation of subtask index,
* operator name and metric name as the name.
*/
addMetric(subtask.metrics, operatorInfo.operatorName + "." + name, metric);
addMetric(task.metrics, operatorInfo.subtaskIndex + "." + operatorInfo.operatorName + "." + name, metric);
break;
default:
LOG.debug("Invalid metric dump category: " + info.getCategory());
}
} catch (Exception e) {
LOG.debug("Malformed metric dump.", e);
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class ThreadDumpInfo method stringifyThreadInfo.
/**
* Custom stringify format of JVM thread info to bypass the MAX_FRAMES = 8 limitation.
*
* <p>This method is based on
* https://github.com/openjdk/jdk/blob/master/src/java.management/share/classes/java/lang/management/ThreadInfo.java#L597
*/
@VisibleForTesting
protected static String stringifyThreadInfo(java.lang.management.ThreadInfo threadInfo, int maxDepth) {
StringBuilder sb = new StringBuilder("\"" + threadInfo.getThreadName() + "\"" + " Id=" + threadInfo.getThreadId() + " " + threadInfo.getThreadState());
if (threadInfo.getLockName() != null) {
sb.append(" on " + threadInfo.getLockName());
}
if (threadInfo.getLockOwnerName() != null) {
sb.append(" owned by \"" + threadInfo.getLockOwnerName() + "\" Id=" + threadInfo.getLockOwnerId());
}
if (threadInfo.isSuspended()) {
sb.append(" (suspended)");
}
if (threadInfo.isInNative()) {
sb.append(" (in native)");
}
sb.append('\n');
int i = 0;
StackTraceElement[] stackTraceElements = threadInfo.getStackTrace();
for (; i < stackTraceElements.length && i < maxDepth; i++) {
StackTraceElement ste = stackTraceElements[i];
sb.append("\tat " + ste.toString());
sb.append('\n');
if (i == 0 && threadInfo.getLockInfo() != null) {
Thread.State ts = threadInfo.getThreadState();
switch(ts) {
case BLOCKED:
sb.append("\t- blocked on " + threadInfo.getLockInfo());
sb.append('\n');
break;
case WAITING:
case TIMED_WAITING:
sb.append("\t- waiting on " + threadInfo.getLockInfo());
sb.append('\n');
break;
default:
}
}
for (MonitorInfo mi : threadInfo.getLockedMonitors()) {
if (mi.getLockedStackDepth() == i) {
sb.append("\t- locked " + mi);
sb.append('\n');
}
}
}
if (i < threadInfo.getStackTrace().length) {
sb.append("\t...");
sb.append('\n');
}
LockInfo[] locks = threadInfo.getLockedSynchronizers();
if (locks.length > 0) {
sb.append("\n\tNumber of locked synchronizers = " + locks.length);
sb.append('\n');
for (LockInfo li : locks) {
sb.append("\t- " + li);
sb.append('\n');
}
}
sb.append('\n');
return sb.toString();
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class SourceCoordinator method announceCombinedWatermark.
@VisibleForTesting
void announceCombinedWatermark() {
checkState(watermarkAlignmentParams != WatermarkAlignmentParams.WATERMARK_ALIGNMENT_DISABLED);
Watermark globalCombinedWatermark = coordinatorStore.apply(watermarkAlignmentParams.getWatermarkGroup(), (value) -> {
WatermarkAggregator aggregator = (WatermarkAggregator) value;
return new Watermark(aggregator.getAggregatedWatermark().getTimestamp());
});
long maxAllowedWatermark = globalCombinedWatermark.getTimestamp() + watermarkAlignmentParams.getMaxAllowedWatermarkDrift();
Set<Integer> subTaskIds = combinedWatermark.keySet();
LOG.info("Distributing maxAllowedWatermark={} to subTaskIds={}", maxAllowedWatermark, subTaskIds);
for (Integer subtaskId : subTaskIds) {
context.sendEventToSourceOperator(subtaskId, new WatermarkAlignmentEvent(maxAllowedWatermark));
}
}
Aggregations