use of org.apache.flink.runtime.rest.messages.job.metrics.IOMetricsInfo in project flink by apache.
the class JobVertexTaskManagersHandler method createJobVertexTaskManagersInfo.
private static JobVertexTaskManagersInfo createJobVertexTaskManagersInfo(AccessExecutionJobVertex jobVertex, JobID jobID, @Nullable MetricFetcher metricFetcher) {
// Build a map that groups tasks by TaskManager
Map<String, String> taskManagerId2Host = new HashMap<>();
Map<String, List<AccessExecutionVertex>> taskManagerVertices = new HashMap<>();
for (AccessExecutionVertex vertex : jobVertex.getTaskVertices()) {
TaskManagerLocation location = vertex.getCurrentAssignedResourceLocation();
String taskManagerHost = location == null ? "(unassigned)" : location.getHostname() + ':' + location.dataPort();
String taskmanagerId = location == null ? "(unassigned)" : location.getResourceID().toString();
taskManagerId2Host.put(taskmanagerId, taskManagerHost);
List<AccessExecutionVertex> vertices = taskManagerVertices.computeIfAbsent(taskmanagerId, ignored -> new ArrayList<>(4));
vertices.add(vertex);
}
final long now = System.currentTimeMillis();
List<JobVertexTaskManagersInfo.TaskManagersInfo> taskManagersInfoList = new ArrayList<>(4);
for (Map.Entry<String, List<AccessExecutionVertex>> entry : taskManagerVertices.entrySet()) {
String taskmanagerId = entry.getKey();
String host = taskManagerId2Host.get(taskmanagerId);
List<AccessExecutionVertex> taskVertices = entry.getValue();
int[] tasksPerState = new int[ExecutionState.values().length];
long startTime = Long.MAX_VALUE;
long endTime = 0;
boolean allFinished = true;
MutableIOMetrics counts = new MutableIOMetrics();
for (AccessExecutionVertex vertex : taskVertices) {
final ExecutionState state = vertex.getExecutionState();
tasksPerState[state.ordinal()]++;
// take the earliest start time
long started = vertex.getStateTimestamp(ExecutionState.DEPLOYING);
if (started > 0) {
startTime = Math.min(startTime, started);
}
allFinished &= state.isTerminal();
endTime = Math.max(endTime, vertex.getStateTimestamp(state));
counts.addIOMetrics(vertex.getCurrentExecutionAttempt(), metricFetcher, jobID.toString(), jobVertex.getJobVertexId().toString());
}
long duration;
if (startTime < Long.MAX_VALUE) {
if (allFinished) {
duration = endTime - startTime;
} else {
endTime = -1L;
duration = now - startTime;
}
} else {
startTime = -1L;
endTime = -1L;
duration = -1L;
}
ExecutionState jobVertexState = ExecutionJobVertex.getAggregateJobVertexState(tasksPerState, taskVertices.size());
final IOMetricsInfo jobVertexMetrics = new IOMetricsInfo(counts.getNumBytesIn(), counts.isNumBytesInComplete(), counts.getNumBytesOut(), counts.isNumBytesOutComplete(), counts.getNumRecordsIn(), counts.isNumRecordsInComplete(), counts.getNumRecordsOut(), counts.isNumRecordsOutComplete());
Map<ExecutionState, Integer> statusCounts = new HashMap<>(ExecutionState.values().length);
for (ExecutionState state : ExecutionState.values()) {
statusCounts.put(state, tasksPerState[state.ordinal()]);
}
taskManagersInfoList.add(new JobVertexTaskManagersInfo.TaskManagersInfo(host, jobVertexState, startTime, endTime, duration, jobVertexMetrics, statusCounts, taskmanagerId));
}
return new JobVertexTaskManagersInfo(jobVertex.getJobVertexId(), jobVertex.getName(), now, taskManagersInfoList);
}
use of org.apache.flink.runtime.rest.messages.job.metrics.IOMetricsInfo in project flink by apache.
the class JobDetailsHandler method createJobVertexDetailsInfo.
private static JobDetailsInfo.JobVertexDetailsInfo createJobVertexDetailsInfo(AccessExecutionJobVertex ejv, long now, JobID jobId, MetricFetcher metricFetcher) {
int[] tasksPerState = new int[ExecutionState.values().length];
long startTime = Long.MAX_VALUE;
long endTime = 0;
boolean allFinished = true;
for (AccessExecutionVertex vertex : ejv.getTaskVertices()) {
final ExecutionState state = vertex.getExecutionState();
tasksPerState[state.ordinal()]++;
// take the earliest start time
long started = vertex.getStateTimestamp(ExecutionState.DEPLOYING);
if (started > 0L) {
startTime = Math.min(startTime, started);
}
allFinished &= state.isTerminal();
endTime = Math.max(endTime, vertex.getStateTimestamp(state));
}
long duration;
if (startTime < Long.MAX_VALUE) {
if (allFinished) {
duration = endTime - startTime;
} else {
endTime = -1L;
duration = now - startTime;
}
} else {
startTime = -1L;
endTime = -1L;
duration = -1L;
}
ExecutionState jobVertexState = ExecutionJobVertex.getAggregateJobVertexState(tasksPerState, ejv.getParallelism());
Map<ExecutionState, Integer> tasksPerStateMap = new HashMap<>(tasksPerState.length);
for (ExecutionState executionState : ExecutionState.values()) {
tasksPerStateMap.put(executionState, tasksPerState[executionState.ordinal()]);
}
MutableIOMetrics counts = new MutableIOMetrics();
for (AccessExecutionVertex vertex : ejv.getTaskVertices()) {
counts.addIOMetrics(vertex.getCurrentExecutionAttempt(), metricFetcher, jobId.toString(), ejv.getJobVertexId().toString());
}
final IOMetricsInfo jobVertexMetrics = new IOMetricsInfo(counts.getNumBytesIn(), counts.isNumBytesInComplete(), counts.getNumBytesOut(), counts.isNumBytesOutComplete(), counts.getNumRecordsIn(), counts.isNumRecordsInComplete(), counts.getNumRecordsOut(), counts.isNumRecordsOutComplete());
return new JobDetailsInfo.JobVertexDetailsInfo(ejv.getJobVertexId(), ejv.getName(), ejv.getMaxParallelism(), ejv.getParallelism(), jobVertexState, startTime, endTime, duration, tasksPerStateMap, jobVertexMetrics);
}
use of org.apache.flink.runtime.rest.messages.job.metrics.IOMetricsInfo in project flink by apache.
the class SubtaskExecutionAttemptDetailsInfoTest method getTestResponseInstance.
@Override
protected SubtaskExecutionAttemptDetailsInfo getTestResponseInstance() throws Exception {
final Random random = new Random();
final IOMetricsInfo ioMetricsInfo = new IOMetricsInfo(Math.abs(random.nextLong()), random.nextBoolean(), Math.abs(random.nextLong()), random.nextBoolean(), Math.abs(random.nextLong()), random.nextBoolean(), Math.abs(random.nextLong()), random.nextBoolean());
return new SubtaskExecutionAttemptDetailsInfo(Math.abs(random.nextInt()), ExecutionState.values()[random.nextInt(ExecutionState.values().length)], Math.abs(random.nextInt()), "localhost:" + random.nextInt(65536), Math.abs(random.nextLong()), Math.abs(random.nextLong()), Math.abs(random.nextLong()), ioMetricsInfo, "taskmanagerId");
}
use of org.apache.flink.runtime.rest.messages.job.metrics.IOMetricsInfo in project flink by apache.
the class JobDetailsInfoTest method createJobVertexDetailsInfo.
private JobDetailsInfo.JobVertexDetailsInfo createJobVertexDetailsInfo(Random random) {
final Map<ExecutionState, Integer> tasksPerState = new HashMap<>(ExecutionState.values().length);
final IOMetricsInfo jobVertexMetrics = new IOMetricsInfo(random.nextLong(), random.nextBoolean(), random.nextLong(), random.nextBoolean(), random.nextLong(), random.nextBoolean(), random.nextLong(), random.nextBoolean());
for (ExecutionState executionState : ExecutionState.values()) {
tasksPerState.put(executionState, random.nextInt());
}
int parallelism = 1 + (random.nextInt() / 3);
return new JobDetailsInfo.JobVertexDetailsInfo(new JobVertexID(), "jobVertex" + random.nextLong(), 2 * parallelism, parallelism, ExecutionState.values()[random.nextInt(ExecutionState.values().length)], random.nextLong(), random.nextLong(), random.nextLong(), tasksPerState, jobVertexMetrics);
}
use of org.apache.flink.runtime.rest.messages.job.metrics.IOMetricsInfo in project flink by apache.
the class JobVertexDetailsInfoTest method getTestResponseInstance.
@Override
protected JobVertexDetailsInfo getTestResponseInstance() throws Exception {
final Random random = new Random();
final IOMetricsInfo jobVertexMetrics = new IOMetricsInfo(random.nextLong(), random.nextBoolean(), random.nextLong(), random.nextBoolean(), random.nextLong(), random.nextBoolean(), random.nextLong(), random.nextBoolean());
List<SubtaskExecutionAttemptDetailsInfo> vertexTaskDetailList = new ArrayList<>();
vertexTaskDetailList.add(new SubtaskExecutionAttemptDetailsInfo(0, ExecutionState.CREATED, random.nextInt(), "local1", System.currentTimeMillis(), System.currentTimeMillis(), 1L, jobVertexMetrics, "taskmanagerId1"));
vertexTaskDetailList.add(new SubtaskExecutionAttemptDetailsInfo(1, ExecutionState.FAILED, random.nextInt(), "local2", System.currentTimeMillis(), System.currentTimeMillis(), 1L, jobVertexMetrics, "taskmanagerId2"));
vertexTaskDetailList.add(new SubtaskExecutionAttemptDetailsInfo(2, ExecutionState.FINISHED, random.nextInt(), "local3", System.currentTimeMillis(), System.currentTimeMillis(), 1L, jobVertexMetrics, "taskmanagerId3"));
int parallelism = 1 + (random.nextInt() / 3);
return new JobVertexDetailsInfo(new JobVertexID(), "jobVertex" + random.nextLong(), parallelism, 2 * parallelism, System.currentTimeMillis(), vertexTaskDetailList);
}
Aggregations