use of org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex in project flink by apache.
the class AbstractJobVertexRequestHandler method handleRequest.
@Override
public final String handleRequest(AccessExecutionGraph graph, Map<String, String> params) throws Exception {
final JobVertexID vid = parseJobVertexId(params);
final AccessExecutionJobVertex jobVertex = graph.getJobVertex(vid);
if (jobVertex == null) {
throw new IllegalArgumentException("No vertex with ID '" + vid + "' exists.");
}
return handleRequest(jobVertex, params);
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex in project flink by apache.
the class JobVertexBackPressureHandler method handleRequest.
@Override
public String handleRequest(AccessExecutionJobVertex accessJobVertex, Map<String, String> params) throws Exception {
if (accessJobVertex instanceof ArchivedExecutionJobVertex) {
return "";
}
ExecutionJobVertex jobVertex = (ExecutionJobVertex) accessJobVertex;
try (StringWriter writer = new StringWriter();
JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer)) {
gen.writeStartObject();
Option<OperatorBackPressureStats> statsOption = backPressureStatsTracker.getOperatorBackPressureStats(jobVertex);
if (statsOption.isDefined()) {
OperatorBackPressureStats stats = statsOption.get();
// Check whether we need to refresh
if (refreshInterval <= System.currentTimeMillis() - stats.getEndTimestamp()) {
backPressureStatsTracker.triggerStackTraceSample(jobVertex);
gen.writeStringField("status", "deprecated");
} else {
gen.writeStringField("status", "ok");
}
gen.writeStringField("backpressure-level", getBackPressureLevel(stats.getMaxBackPressureRatio()));
gen.writeNumberField("end-timestamp", stats.getEndTimestamp());
// Sub tasks
gen.writeArrayFieldStart("subtasks");
int numSubTasks = stats.getNumberOfSubTasks();
for (int i = 0; i < numSubTasks; i++) {
double ratio = stats.getBackPressureRatio(i);
gen.writeStartObject();
gen.writeNumberField("subtask", i);
gen.writeStringField("backpressure-level", getBackPressureLevel(ratio));
gen.writeNumberField("ratio", ratio);
gen.writeEndObject();
}
gen.writeEndArray();
} else {
backPressureStatsTracker.triggerStackTraceSample(jobVertex);
gen.writeStringField("status", "deprecated");
}
gen.writeEndObject();
gen.close();
return writer.toString();
}
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex in project flink by apache.
the class JobDetailsHandlerTest method compareJobDetails.
private static void compareJobDetails(AccessExecutionGraph originalJob, String json) throws IOException {
JsonNode result = ArchivedJobGenerationUtils.mapper.readTree(json);
Assert.assertEquals(originalJob.getJobID().toString(), result.get("jid").asText());
Assert.assertEquals(originalJob.getJobName(), result.get("name").asText());
Assert.assertEquals(originalJob.isStoppable(), result.get("isStoppable").asBoolean());
Assert.assertEquals(originalJob.getState().name(), result.get("state").asText());
Assert.assertEquals(originalJob.getStatusTimestamp(JobStatus.CREATED), result.get("start-time").asLong());
Assert.assertEquals(originalJob.getStatusTimestamp(originalJob.getState()), result.get("end-time").asLong());
Assert.assertEquals(originalJob.getStatusTimestamp(originalJob.getState()) - originalJob.getStatusTimestamp(JobStatus.CREATED), result.get("duration").asLong());
JsonNode timestamps = result.get("timestamps");
for (JobStatus status : JobStatus.values()) {
Assert.assertEquals(originalJob.getStatusTimestamp(status), timestamps.get(status.name()).asLong());
}
ArrayNode tasks = (ArrayNode) result.get("vertices");
int x = 0;
for (AccessExecutionJobVertex expectedTask : originalJob.getVerticesTopologically()) {
JsonNode task = tasks.get(x);
Assert.assertEquals(expectedTask.getJobVertexId().toString(), task.get("id").asText());
Assert.assertEquals(expectedTask.getName(), task.get("name").asText());
Assert.assertEquals(expectedTask.getParallelism(), task.get("parallelism").asInt());
Assert.assertEquals(expectedTask.getAggregateState().name(), task.get("status").asText());
Assert.assertEquals(3, task.get("start-time").asLong());
Assert.assertEquals(5, task.get("end-time").asLong());
Assert.assertEquals(2, task.get("duration").asLong());
JsonNode subtasksPerState = task.get("tasks");
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.CREATED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.SCHEDULED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.DEPLOYING.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.RUNNING.name()).asInt());
Assert.assertEquals(1, subtasksPerState.get(ExecutionState.FINISHED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.CANCELING.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.CANCELED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.FAILED.name()).asInt());
long expectedNumBytesIn = 0;
long expectedNumBytesOut = 0;
long expectedNumRecordsIn = 0;
long expectedNumRecordsOut = 0;
for (AccessExecutionVertex vertex : expectedTask.getTaskVertices()) {
IOMetrics ioMetrics = vertex.getCurrentExecutionAttempt().getIOMetrics();
expectedNumBytesIn += ioMetrics.getNumBytesInLocal() + ioMetrics.getNumBytesInRemote();
expectedNumBytesOut += ioMetrics.getNumBytesOut();
expectedNumRecordsIn += ioMetrics.getNumRecordsIn();
expectedNumRecordsOut += ioMetrics.getNumRecordsOut();
}
JsonNode metrics = task.get("metrics");
Assert.assertEquals(expectedNumBytesIn, metrics.get("read-bytes").asLong());
Assert.assertEquals(expectedNumBytesOut, metrics.get("write-bytes").asLong());
Assert.assertEquals(expectedNumRecordsIn, metrics.get("read-records").asLong());
Assert.assertEquals(expectedNumRecordsOut, metrics.get("write-records").asLong());
x++;
}
Assert.assertEquals(1, tasks.size());
JsonNode statusCounts = result.get("status-counts");
Assert.assertEquals(0, statusCounts.get(ExecutionState.CREATED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.SCHEDULED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.DEPLOYING.name()).asInt());
Assert.assertEquals(1, statusCounts.get(ExecutionState.RUNNING.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.FINISHED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.CANCELING.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.CANCELED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.FAILED.name()).asInt());
Assert.assertEquals(ArchivedJobGenerationUtils.mapper.readTree(originalJob.getJsonPlan()), result.get("plan"));
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex in project flink by apache.
the class JobVertexAccumulatorsHandlerTest method testArchiver.
@Test
public void testArchiver() throws Exception {
JsonArchivist archivist = new JobVertexAccumulatorsHandler.JobVertexAccumulatorsJsonArchivist();
AccessExecutionGraph originalJob = ArchivedJobGenerationUtils.getTestJob();
AccessExecutionJobVertex originalTask = ArchivedJobGenerationUtils.getTestTask();
Collection<ArchivedJson> archives = archivist.archiveJsonWithPath(originalJob);
Assert.assertEquals(1, archives.size());
ArchivedJson archive = archives.iterator().next();
Assert.assertEquals("/jobs/" + originalJob.getJobID() + "/vertices/" + originalTask.getJobVertexId() + "/accumulators", archive.getPath());
compareAccumulators(originalTask, archive.getJson());
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionJobVertex in project flink by apache.
the class JobVertexDetailsHandlerTest method testJsonGeneration.
@Test
public void testJsonGeneration() throws Exception {
AccessExecutionJobVertex originalTask = ArchivedJobGenerationUtils.getTestTask();
String json = JobVertexDetailsHandler.createVertexDetailsJson(originalTask, ArchivedJobGenerationUtils.getTestJob().getJobID().toString(), null);
compareVertexDetails(originalTask, json);
}
Aggregations