Search in sources :

Example 46 with TaskManagerLocation

use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.

the class JobVertexTaskManagersHandler method createVertexDetailsByTaskManagerJson.

public static String createVertexDetailsByTaskManagerJson(AccessExecutionJobVertex jobVertex, String jobID, @Nullable MetricFetcher fetcher) throws IOException {
    StringWriter writer = new StringWriter();
    JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer);
    // Build a map that groups tasks by TaskManager
    Map<String, List<AccessExecutionVertex>> taskManagerVertices = new HashMap<>();
    for (AccessExecutionVertex vertex : jobVertex.getTaskVertices()) {
        TaskManagerLocation location = vertex.getCurrentAssignedResourceLocation();
        String taskManager = location == null ? "(unassigned)" : location.getHostname() + ":" + location.dataPort();
        List<AccessExecutionVertex> vertices = taskManagerVertices.get(taskManager);
        if (vertices == null) {
            vertices = new ArrayList<>();
            taskManagerVertices.put(taskManager, vertices);
        }
        vertices.add(vertex);
    }
    // Build JSON response
    final long now = System.currentTimeMillis();
    gen.writeStartObject();
    gen.writeStringField("id", jobVertex.getJobVertexId().toString());
    gen.writeStringField("name", jobVertex.getName());
    gen.writeNumberField("now", now);
    gen.writeArrayFieldStart("taskmanagers");
    for (Map.Entry<String, List<AccessExecutionVertex>> entry : taskManagerVertices.entrySet()) {
        String host = entry.getKey();
        List<AccessExecutionVertex> taskVertices = entry.getValue();
        int[] tasksPerState = new int[ExecutionState.values().length];
        long startTime = Long.MAX_VALUE;
        long endTime = 0;
        boolean allFinished = true;
        MutableIOMetrics counts = new MutableIOMetrics();
        for (AccessExecutionVertex vertex : taskVertices) {
            final ExecutionState state = vertex.getExecutionState();
            tasksPerState[state.ordinal()]++;
            // take the earliest start time
            long started = vertex.getStateTimestamp(ExecutionState.DEPLOYING);
            if (started > 0) {
                startTime = Math.min(startTime, started);
            }
            allFinished &= state.isTerminal();
            endTime = Math.max(endTime, vertex.getStateTimestamp(state));
            counts.addIOMetrics(vertex.getCurrentExecutionAttempt(), fetcher, jobID, jobVertex.getJobVertexId().toString());
        }
        long duration;
        if (startTime < Long.MAX_VALUE) {
            if (allFinished) {
                duration = endTime - startTime;
            } else {
                endTime = -1L;
                duration = now - startTime;
            }
        } else {
            startTime = -1L;
            endTime = -1L;
            duration = -1L;
        }
        ExecutionState jobVertexState = ExecutionJobVertex.getAggregateJobVertexState(tasksPerState, taskVertices.size());
        gen.writeStartObject();
        gen.writeStringField("host", host);
        gen.writeStringField("status", jobVertexState.name());
        gen.writeNumberField("start-time", startTime);
        gen.writeNumberField("end-time", endTime);
        gen.writeNumberField("duration", duration);
        counts.writeIOMetricsAsJson(gen);
        gen.writeObjectFieldStart("status-counts");
        for (ExecutionState state : ExecutionState.values()) {
            gen.writeNumberField(state.name(), tasksPerState[state.ordinal()]);
        }
        gen.writeEndObject();
        gen.writeEndObject();
    }
    gen.writeEndArray();
    gen.writeEndObject();
    gen.close();
    return writer.toString();
}
Also used : ExecutionState(org.apache.flink.runtime.execution.ExecutionState) MutableIOMetrics(org.apache.flink.runtime.webmonitor.utils.MutableIOMetrics) HashMap(java.util.HashMap) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) StringWriter(java.io.StringWriter) JsonGenerator(com.fasterxml.jackson.core.JsonGenerator) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) AccessExecutionVertex(org.apache.flink.runtime.executiongraph.AccessExecutionVertex)

Example 47 with TaskManagerLocation

use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.

the class SubtasksAllAccumulatorsHandler method createSubtasksAccumulatorsJson.

public static String createSubtasksAccumulatorsJson(AccessExecutionJobVertex jobVertex) throws IOException {
    StringWriter writer = new StringWriter();
    JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer);
    gen.writeStartObject();
    gen.writeStringField("id", jobVertex.getJobVertexId().toString());
    gen.writeNumberField("parallelism", jobVertex.getParallelism());
    gen.writeArrayFieldStart("subtasks");
    int num = 0;
    for (AccessExecutionVertex vertex : jobVertex.getTaskVertices()) {
        TaskManagerLocation location = vertex.getCurrentAssignedResourceLocation();
        String locationString = location == null ? "(unassigned)" : location.getHostname();
        gen.writeStartObject();
        gen.writeNumberField("subtask", num++);
        gen.writeNumberField("attempt", vertex.getCurrentExecutionAttempt().getAttemptNumber());
        gen.writeStringField("host", locationString);
        StringifiedAccumulatorResult[] accs = vertex.getCurrentExecutionAttempt().getUserAccumulatorsStringified();
        gen.writeArrayFieldStart("user-accumulators");
        for (StringifiedAccumulatorResult acc : accs) {
            gen.writeStartObject();
            gen.writeStringField("name", acc.getName());
            gen.writeStringField("type", acc.getType());
            gen.writeStringField("value", acc.getValue());
            gen.writeEndObject();
        }
        gen.writeEndArray();
        gen.writeEndObject();
    }
    gen.writeEndArray();
    gen.writeEndObject();
    gen.close();
    return writer.toString();
}
Also used : StringWriter(java.io.StringWriter) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) StringifiedAccumulatorResult(org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult) JsonGenerator(com.fasterxml.jackson.core.JsonGenerator) AccessExecutionVertex(org.apache.flink.runtime.executiongraph.AccessExecutionVertex)

Example 48 with TaskManagerLocation

use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.

the class ArchivedJobGenerationUtils method generateArchivedJob.

private static void generateArchivedJob() throws Exception {
    // Attempt
    StringifiedAccumulatorResult acc1 = new StringifiedAccumulatorResult("name1", "type1", "value1");
    StringifiedAccumulatorResult acc2 = new StringifiedAccumulatorResult("name2", "type2", "value2");
    TaskManagerLocation location = new TaskManagerLocation(new ResourceID("hello"), InetAddress.getLocalHost(), 1234);
    originalAttempt = new ArchivedExecutionBuilder().setStateTimestamps(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 }).setParallelSubtaskIndex(1).setAttemptNumber(0).setAssignedResourceLocation(location).setUserAccumulators(new StringifiedAccumulatorResult[] { acc1, acc2 }).setState(ExecutionState.FINISHED).setFailureCause("attemptException").build();
    // Subtask
    originalSubtask = new ArchivedExecutionVertexBuilder().setSubtaskIndex(originalAttempt.getParallelSubtaskIndex()).setTaskNameWithSubtask("hello(1/1)").setCurrentExecution(originalAttempt).build();
    // Task
    originalTask = new ArchivedExecutionJobVertexBuilder().setTaskVertices(new ArchivedExecutionVertex[] { originalSubtask }).build();
    // Job
    Map<JobVertexID, ArchivedExecutionJobVertex> tasks = new HashMap<>();
    tasks.put(originalTask.getJobVertexId(), originalTask);
    originalJob = new ArchivedExecutionGraphBuilder().setJobID(new JobID()).setTasks(tasks).setFailureCause("jobException").setState(JobStatus.FINISHED).setStateTimestamps(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }).setArchivedUserAccumulators(new StringifiedAccumulatorResult[] { acc1, acc2 }).build();
}
Also used : ArchivedExecutionJobVertex(org.apache.flink.runtime.executiongraph.ArchivedExecutionJobVertex) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) HashMap(java.util.HashMap) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) StringifiedAccumulatorResult(org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult) JobID(org.apache.flink.api.common.JobID)

Example 49 with TaskManagerLocation

use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.

the class JobVertexDetailsHandlerTest method compareVertexDetails.

private static void compareVertexDetails(AccessExecutionJobVertex originalTask, String json) throws IOException {
    JsonNode result = ArchivedJobGenerationUtils.mapper.readTree(json);
    Assert.assertEquals(originalTask.getJobVertexId().toString(), result.get("id").asText());
    Assert.assertEquals(originalTask.getName(), result.get("name").asText());
    Assert.assertEquals(originalTask.getParallelism(), result.get("parallelism").asInt());
    Assert.assertTrue(result.get("now").asLong() > 0);
    ArrayNode subtasks = (ArrayNode) result.get("subtasks");
    Assert.assertEquals(originalTask.getTaskVertices().length, subtasks.size());
    for (int x = 0; x < originalTask.getTaskVertices().length; x++) {
        AccessExecutionVertex expectedSubtask = originalTask.getTaskVertices()[x];
        JsonNode subtask = subtasks.get(x);
        Assert.assertEquals(x, subtask.get("subtask").asInt());
        Assert.assertEquals(expectedSubtask.getExecutionState().name(), subtask.get("status").asText());
        Assert.assertEquals(expectedSubtask.getCurrentExecutionAttempt().getAttemptNumber(), subtask.get("attempt").asInt());
        TaskManagerLocation location = expectedSubtask.getCurrentAssignedResourceLocation();
        String expectedLocationString = location.getHostname() + ":" + location.dataPort();
        Assert.assertEquals(expectedLocationString, subtask.get("host").asText());
        long start = expectedSubtask.getStateTimestamp(ExecutionState.DEPLOYING);
        Assert.assertEquals(start, subtask.get("start-time").asLong());
        long end = expectedSubtask.getStateTimestamp(ExecutionState.FINISHED);
        Assert.assertEquals(end, subtask.get("end-time").asLong());
        Assert.assertEquals(end - start, subtask.get("duration").asLong());
        ArchivedJobGenerationUtils.compareIoMetrics(expectedSubtask.getCurrentExecutionAttempt().getIOMetrics(), subtask.get("metrics"));
    }
}
Also used : TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) JsonNode(com.fasterxml.jackson.databind.JsonNode) ArrayNode(com.fasterxml.jackson.databind.node.ArrayNode) AccessExecutionVertex(org.apache.flink.runtime.executiongraph.AccessExecutionVertex)

Example 50 with TaskManagerLocation

use of org.apache.flink.runtime.taskmanager.TaskManagerLocation in project flink by apache.

the class ExecutionVertex method getPreferredLocationsBasedOnInputs.

/**
	 * Gets the location preferences of the vertex's current task execution, as determined by the locations
	 * of the predecessors from which it receives input data.
	 * If there are more than MAX_DISTINCT_LOCATIONS_TO_CONSIDER different locations of source data, this
	 * method returns {@code null} to indicate no location preference.
	 *
	 * @return The preferred locations based in input streams, or an empty iterable,
	 *         if there is no input-based preference.
	 */
public Iterable<TaskManagerLocation> getPreferredLocationsBasedOnInputs() {
    // otherwise, base the preferred locations on the input connections
    if (inputEdges == null) {
        return Collections.emptySet();
    } else {
        Set<TaskManagerLocation> locations = new HashSet<>();
        Set<TaskManagerLocation> inputLocations = new HashSet<>();
        // go over all inputs
        for (int i = 0; i < inputEdges.length; i++) {
            inputLocations.clear();
            ExecutionEdge[] sources = inputEdges[i];
            if (sources != null) {
                // go over all input sources
                for (int k = 0; k < sources.length; k++) {
                    // look-up assigned slot of input source
                    SimpleSlot sourceSlot = sources[k].getSource().getProducer().getCurrentAssignedResource();
                    if (sourceSlot != null) {
                        // add input location
                        inputLocations.add(sourceSlot.getTaskManagerLocation());
                        // inputs which have too many distinct sources are not considered
                        if (inputLocations.size() > MAX_DISTINCT_LOCATIONS_TO_CONSIDER) {
                            inputLocations.clear();
                            break;
                        }
                    }
                }
            }
            // keep the locations of the input with the least preferred locations
            if (// nothing assigned yet
            locations.isEmpty() || (!inputLocations.isEmpty() && inputLocations.size() < locations.size())) {
                // current input has fewer preferred locations
                locations.clear();
                locations.addAll(inputLocations);
            }
        }
        return locations.isEmpty() ? Collections.<TaskManagerLocation>emptyList() : locations;
    }
}
Also used : TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) SimpleSlot(org.apache.flink.runtime.instance.SimpleSlot) CoLocationConstraint(org.apache.flink.runtime.jobmanager.scheduler.CoLocationConstraint) HashSet(java.util.HashSet)

Aggregations

TaskManagerLocation (org.apache.flink.runtime.taskmanager.TaskManagerLocation)84 Test (org.junit.Test)42 ResourceID (org.apache.flink.runtime.clusterframework.types.ResourceID)25 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)18 AccessExecutionVertex (org.apache.flink.runtime.executiongraph.AccessExecutionVertex)15 SimpleSlot (org.apache.flink.runtime.instance.SimpleSlot)15 ArrayList (java.util.ArrayList)14 JobID (org.apache.flink.api.common.JobID)13 InetAddress (java.net.InetAddress)12 ExecutionException (java.util.concurrent.ExecutionException)12 AllocationID (org.apache.flink.runtime.clusterframework.types.AllocationID)12 ExecutionState (org.apache.flink.runtime.execution.ExecutionState)12 Instance (org.apache.flink.runtime.instance.Instance)12 LocalTaskManagerLocation (org.apache.flink.runtime.taskmanager.LocalTaskManagerLocation)11 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)10 HashMap (java.util.HashMap)9 ActorTaskManagerGateway (org.apache.flink.runtime.jobmanager.slots.ActorTaskManagerGateway)9 Collection (java.util.Collection)8 SchedulerTestUtils.getRandomInstance (org.apache.flink.runtime.jobmanager.scheduler.SchedulerTestUtils.getRandomInstance)8 List (java.util.List)7