use of org.apache.flink.runtime.executiongraph.AccessExecutionVertex in project flink by apache.
the class SubtasksTimesHandler method createSubtaskTimesJson.
public static String createSubtaskTimesJson(AccessExecutionJobVertex jobVertex) throws IOException {
final long now = System.currentTimeMillis();
StringWriter writer = new StringWriter();
JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer);
gen.writeStartObject();
gen.writeStringField("id", jobVertex.getJobVertexId().toString());
gen.writeStringField("name", jobVertex.getName());
gen.writeNumberField("now", now);
gen.writeArrayFieldStart("subtasks");
int num = 0;
for (AccessExecutionVertex vertex : jobVertex.getTaskVertices()) {
long[] timestamps = vertex.getCurrentExecutionAttempt().getStateTimestamps();
ExecutionState status = vertex.getExecutionState();
long scheduledTime = timestamps[ExecutionState.SCHEDULED.ordinal()];
long start = scheduledTime > 0 ? scheduledTime : -1;
long end = status.isTerminal() ? timestamps[status.ordinal()] : now;
long duration = start >= 0 ? end - start : -1L;
gen.writeStartObject();
gen.writeNumberField("subtask", num++);
TaskManagerLocation location = vertex.getCurrentAssignedResourceLocation();
String locationString = location == null ? "(unassigned)" : location.getHostname();
gen.writeStringField("host", locationString);
gen.writeNumberField("duration", duration);
gen.writeObjectFieldStart("timestamps");
for (ExecutionState state : ExecutionState.values()) {
gen.writeNumberField(state.name(), timestamps[state.ordinal()]);
}
gen.writeEndObject();
gen.writeEndObject();
}
gen.writeEndArray();
gen.writeEndObject();
gen.close();
return writer.toString();
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionVertex in project flink by apache.
the class JobDetailsHandlerTest method compareJobDetails.
private static void compareJobDetails(AccessExecutionGraph originalJob, String json) throws IOException {
JsonNode result = ArchivedJobGenerationUtils.mapper.readTree(json);
Assert.assertEquals(originalJob.getJobID().toString(), result.get("jid").asText());
Assert.assertEquals(originalJob.getJobName(), result.get("name").asText());
Assert.assertEquals(originalJob.isStoppable(), result.get("isStoppable").asBoolean());
Assert.assertEquals(originalJob.getState().name(), result.get("state").asText());
Assert.assertEquals(originalJob.getStatusTimestamp(JobStatus.CREATED), result.get("start-time").asLong());
Assert.assertEquals(originalJob.getStatusTimestamp(originalJob.getState()), result.get("end-time").asLong());
Assert.assertEquals(originalJob.getStatusTimestamp(originalJob.getState()) - originalJob.getStatusTimestamp(JobStatus.CREATED), result.get("duration").asLong());
JsonNode timestamps = result.get("timestamps");
for (JobStatus status : JobStatus.values()) {
Assert.assertEquals(originalJob.getStatusTimestamp(status), timestamps.get(status.name()).asLong());
}
ArrayNode tasks = (ArrayNode) result.get("vertices");
int x = 0;
for (AccessExecutionJobVertex expectedTask : originalJob.getVerticesTopologically()) {
JsonNode task = tasks.get(x);
Assert.assertEquals(expectedTask.getJobVertexId().toString(), task.get("id").asText());
Assert.assertEquals(expectedTask.getName(), task.get("name").asText());
Assert.assertEquals(expectedTask.getParallelism(), task.get("parallelism").asInt());
Assert.assertEquals(expectedTask.getAggregateState().name(), task.get("status").asText());
Assert.assertEquals(3, task.get("start-time").asLong());
Assert.assertEquals(5, task.get("end-time").asLong());
Assert.assertEquals(2, task.get("duration").asLong());
JsonNode subtasksPerState = task.get("tasks");
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.CREATED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.SCHEDULED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.DEPLOYING.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.RUNNING.name()).asInt());
Assert.assertEquals(1, subtasksPerState.get(ExecutionState.FINISHED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.CANCELING.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.CANCELED.name()).asInt());
Assert.assertEquals(0, subtasksPerState.get(ExecutionState.FAILED.name()).asInt());
long expectedNumBytesIn = 0;
long expectedNumBytesOut = 0;
long expectedNumRecordsIn = 0;
long expectedNumRecordsOut = 0;
for (AccessExecutionVertex vertex : expectedTask.getTaskVertices()) {
IOMetrics ioMetrics = vertex.getCurrentExecutionAttempt().getIOMetrics();
expectedNumBytesIn += ioMetrics.getNumBytesInLocal() + ioMetrics.getNumBytesInRemote();
expectedNumBytesOut += ioMetrics.getNumBytesOut();
expectedNumRecordsIn += ioMetrics.getNumRecordsIn();
expectedNumRecordsOut += ioMetrics.getNumRecordsOut();
}
JsonNode metrics = task.get("metrics");
Assert.assertEquals(expectedNumBytesIn, metrics.get("read-bytes").asLong());
Assert.assertEquals(expectedNumBytesOut, metrics.get("write-bytes").asLong());
Assert.assertEquals(expectedNumRecordsIn, metrics.get("read-records").asLong());
Assert.assertEquals(expectedNumRecordsOut, metrics.get("write-records").asLong());
x++;
}
Assert.assertEquals(1, tasks.size());
JsonNode statusCounts = result.get("status-counts");
Assert.assertEquals(0, statusCounts.get(ExecutionState.CREATED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.SCHEDULED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.DEPLOYING.name()).asInt());
Assert.assertEquals(1, statusCounts.get(ExecutionState.RUNNING.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.FINISHED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.CANCELING.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.CANCELED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.FAILED.name()).asInt());
Assert.assertEquals(ArchivedJobGenerationUtils.mapper.readTree(originalJob.getJsonPlan()), result.get("plan"));
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionVertex in project flink by apache.
the class JobExceptionsHandlerTest method compareExceptions.
private static void compareExceptions(AccessExecutionGraph originalJob, String json) throws IOException {
JsonNode result = ArchivedJobGenerationUtils.mapper.readTree(json);
Assert.assertEquals(originalJob.getFailureCauseAsString(), result.get("root-exception").asText());
ArrayNode exceptions = (ArrayNode) result.get("all-exceptions");
int x = 0;
for (AccessExecutionVertex expectedSubtask : originalJob.getAllExecutionVertices()) {
if (!expectedSubtask.getFailureCauseAsString().equals(ExceptionUtils.STRINGIFIED_NULL_EXCEPTION)) {
JsonNode exception = exceptions.get(x);
Assert.assertEquals(expectedSubtask.getFailureCauseAsString(), exception.get("exception").asText());
Assert.assertEquals(expectedSubtask.getTaskNameWithSubtaskIndex(), exception.get("task").asText());
TaskManagerLocation location = expectedSubtask.getCurrentAssignedResourceLocation();
String expectedLocationString = location.getFQDNHostname() + ':' + location.dataPort();
Assert.assertEquals(expectedLocationString, exception.get("location").asText());
}
x++;
}
Assert.assertEquals(x > JobExceptionsHandler.MAX_NUMBER_EXCEPTION_TO_REPORT, result.get("truncated").asBoolean());
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionVertex in project flink by apache.
the class JobVertexTaskManagersHandlerTest method testJsonGeneration.
@Test
public void testJsonGeneration() throws Exception {
AccessExecutionJobVertex originalTask = ArchivedJobGenerationUtils.getTestTask();
AccessExecutionVertex originalSubtask = ArchivedJobGenerationUtils.getTestSubtask();
String json = JobVertexTaskManagersHandler.createVertexDetailsByTaskManagerJson(originalTask, ArchivedJobGenerationUtils.getTestJob().getJobID().toString(), null);
compareVertexTaskManagers(originalTask, originalSubtask, json);
}
use of org.apache.flink.runtime.executiongraph.AccessExecutionVertex in project flink by apache.
the class JobVertexTaskManagersHandlerTest method compareVertexTaskManagers.
private static void compareVertexTaskManagers(AccessExecutionJobVertex originalTask, AccessExecutionVertex originalSubtask, String json) throws IOException {
JsonNode result = ArchivedJobGenerationUtils.mapper.readTree(json);
Assert.assertEquals(originalTask.getJobVertexId().toString(), result.get("id").asText());
Assert.assertEquals(originalTask.getName(), result.get("name").asText());
Assert.assertTrue(result.get("now").asLong() > 0);
ArrayNode taskmanagers = (ArrayNode) result.get("taskmanagers");
JsonNode taskManager = taskmanagers.get(0);
TaskManagerLocation location = originalSubtask.getCurrentAssignedResourceLocation();
String expectedLocationString = location.getHostname() + ':' + location.dataPort();
Assert.assertEquals(expectedLocationString, taskManager.get("host").asText());
Assert.assertEquals(ExecutionState.FINISHED.name(), taskManager.get("status").asText());
Assert.assertEquals(3, taskManager.get("start-time").asLong());
Assert.assertEquals(5, taskManager.get("end-time").asLong());
Assert.assertEquals(2, taskManager.get("duration").asLong());
JsonNode statusCounts = taskManager.get("status-counts");
Assert.assertEquals(0, statusCounts.get(ExecutionState.CREATED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.SCHEDULED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.DEPLOYING.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.RUNNING.name()).asInt());
Assert.assertEquals(1, statusCounts.get(ExecutionState.FINISHED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.CANCELING.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.CANCELED.name()).asInt());
Assert.assertEquals(0, statusCounts.get(ExecutionState.FAILED.name()).asInt());
long expectedNumBytesIn = 0;
long expectedNumBytesOut = 0;
long expectedNumRecordsIn = 0;
long expectedNumRecordsOut = 0;
for (AccessExecutionVertex vertex : originalTask.getTaskVertices()) {
IOMetrics ioMetrics = vertex.getCurrentExecutionAttempt().getIOMetrics();
expectedNumBytesIn += ioMetrics.getNumBytesInLocal() + ioMetrics.getNumBytesInRemote();
expectedNumBytesOut += ioMetrics.getNumBytesOut();
expectedNumRecordsIn += ioMetrics.getNumRecordsIn();
expectedNumRecordsOut += ioMetrics.getNumRecordsOut();
}
JsonNode metrics = taskManager.get("metrics");
Assert.assertEquals(expectedNumBytesIn, metrics.get("read-bytes").asLong());
Assert.assertEquals(expectedNumBytesOut, metrics.get("write-bytes").asLong());
Assert.assertEquals(expectedNumRecordsIn, metrics.get("read-records").asLong());
Assert.assertEquals(expectedNumRecordsOut, metrics.get("write-records").asLong());
}
Aggregations