Search in sources :

Example 16 with PartitionId

use of org.apache.hyracks.api.partitions.PartitionId in project asterixdb by apache.

the class JobExecutor method assignRunnabilityRank.

/*
     * Runnability rank has the following semantics
     * Runnability(Runnable TaskCluster depending on completed TaskClusters) = {RUNNABLE, 0}
     * Runnability(Runnable TaskCluster) = max(Rank(Dependent TaskClusters)) + 1
     * Runnability(Non-schedulable TaskCluster) = {NOT_RUNNABLE, _}
     */
private Runnability assignRunnabilityRank(TaskCluster goal, Map<TaskCluster, Runnability> runnabilityMap) {
    if (LOGGER.isLoggable(Level.FINE)) {
        LOGGER.fine("Computing runnability: " + goal);
    }
    if (runnabilityMap.containsKey(goal)) {
        return runnabilityMap.get(goal);
    }
    TaskClusterAttempt lastAttempt = findLastTaskClusterAttempt(goal);
    if (lastAttempt != null) {
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("Last Attempt Status: " + lastAttempt.getStatus());
        }
        if (lastAttempt.getStatus() == TaskClusterAttempt.TaskClusterStatus.COMPLETED) {
            Runnability runnability = new Runnability(Runnability.Tag.COMPLETED, Integer.MIN_VALUE);
            runnabilityMap.put(goal, runnability);
            return runnability;
        }
        if (lastAttempt.getStatus() == TaskClusterAttempt.TaskClusterStatus.RUNNING) {
            Runnability runnability = new Runnability(Runnability.Tag.RUNNING, Integer.MIN_VALUE);
            runnabilityMap.put(goal, runnability);
            return runnability;
        }
    }
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicyMap = jobRun.getConnectorPolicyMap();
    PartitionMatchMaker pmm = jobRun.getPartitionMatchMaker();
    Runnability aggregateRunnability = new Runnability(Runnability.Tag.RUNNABLE, 0);
    for (PartitionId pid : goal.getRequiredPartitions()) {
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("Inspecting required partition: " + pid);
        }
        Runnability runnability;
        ConnectorDescriptorId cdId = pid.getConnectorDescriptorId();
        IConnectorPolicy cPolicy = connectorPolicyMap.get(cdId);
        PartitionState maxState = pmm.getMaximumAvailableState(pid);
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("Policy: " + cPolicy + " maxState: " + maxState);
        }
        if (PartitionState.COMMITTED.equals(maxState)) {
            runnability = new Runnability(Runnability.Tag.RUNNABLE, 0);
        } else if (PartitionState.STARTED.equals(maxState) && !cPolicy.consumerWaitsForProducerToFinish()) {
            runnability = new Runnability(Runnability.Tag.RUNNABLE, 1);
        } else {
            runnability = assignRunnabilityRank(partitionProducingTaskClusterMap.get(pid), runnabilityMap);
            switch(runnability.getTag()) {
                case RUNNABLE:
                    if (cPolicy.consumerWaitsForProducerToFinish()) {
                        runnability = new Runnability(Runnability.Tag.NOT_RUNNABLE, Integer.MAX_VALUE);
                    } else {
                        runnability = new Runnability(Runnability.Tag.RUNNABLE, runnability.getPriority() + 1);
                    }
                    break;
                case NOT_RUNNABLE:
                    break;
                case RUNNING:
                    if (cPolicy.consumerWaitsForProducerToFinish()) {
                        runnability = new Runnability(Runnability.Tag.NOT_RUNNABLE, Integer.MAX_VALUE);
                    } else {
                        runnability = new Runnability(Runnability.Tag.RUNNABLE, 1);
                    }
                    break;
                default:
                    break;
            }
        }
        aggregateRunnability = Runnability.getWorstCase(aggregateRunnability, runnability);
        if (aggregateRunnability.getTag() == Runnability.Tag.NOT_RUNNABLE) {
            // already not runnable -- cannot get better. bail.
            break;
        }
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("aggregateRunnability: " + aggregateRunnability);
        }
    }
    runnabilityMap.put(goal, aggregateRunnability);
    return aggregateRunnability;
}
Also used : TaskClusterAttempt(org.apache.hyracks.control.cc.job.TaskClusterAttempt) IConnectorPolicy(org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) PartitionState(org.apache.hyracks.control.common.job.PartitionState) PartitionMatchMaker(org.apache.hyracks.control.cc.partitions.PartitionMatchMaker) PartitionId(org.apache.hyracks.api.partitions.PartitionId)

Example 17 with PartitionId

use of org.apache.hyracks.api.partitions.PartitionId in project asterixdb by apache.

the class JobRun method toJSON.

public ObjectNode toJSON() {
    ObjectMapper om = new ObjectMapper();
    ObjectNode result = om.createObjectNode();
    result.put("job-id", jobId.toString());
    result.putPOJO("status", getStatus());
    result.put("create-time", getCreateTime());
    result.put("start-time", getStartTime());
    result.put("end-time", getEndTime());
    ArrayNode aClusters = om.createArrayNode();
    for (ActivityCluster ac : acg.getActivityClusterMap().values()) {
        ObjectNode acJSON = om.createObjectNode();
        acJSON.put("activity-cluster-id", String.valueOf(ac.getId()));
        ArrayNode activitiesJSON = om.createArrayNode();
        for (ActivityId aid : ac.getActivityMap().keySet()) {
            activitiesJSON.addPOJO(aid);
        }
        acJSON.set("activities", activitiesJSON);
        ArrayNode dependenciesJSON = om.createArrayNode();
        for (ActivityCluster dependency : ac.getDependencies()) {
            dependenciesJSON.add(String.valueOf(dependency.getId()));
        }
        acJSON.set("dependencies", dependenciesJSON);
        ActivityClusterPlan acp = activityClusterPlanMap.get(ac.getId());
        if (acp == null) {
            acJSON.putNull("plan");
        } else {
            ObjectNode planJSON = om.createObjectNode();
            ArrayNode acTasks = om.createArrayNode();
            for (Map.Entry<ActivityId, ActivityPlan> e : acp.getActivityPlanMap().entrySet()) {
                ActivityPlan acPlan = e.getValue();
                ObjectNode entry = om.createObjectNode();
                entry.put("activity-id", e.getKey().toString());
                ActivityPartitionDetails apd = acPlan.getActivityPartitionDetails();
                entry.put("partition-count", apd.getPartitionCount());
                ArrayNode inPartCountsJSON = om.createArrayNode();
                int[] inPartCounts = apd.getInputPartitionCounts();
                if (inPartCounts != null) {
                    for (int i : inPartCounts) {
                        inPartCountsJSON.add(i);
                    }
                }
                entry.set("input-partition-counts", inPartCountsJSON);
                ArrayNode outPartCountsJSON = om.createArrayNode();
                int[] outPartCounts = apd.getOutputPartitionCounts();
                if (outPartCounts != null) {
                    for (int o : outPartCounts) {
                        outPartCountsJSON.add(o);
                    }
                }
                entry.set("output-partition-counts", outPartCountsJSON);
                ArrayNode tasks = om.createArrayNode();
                for (Task t : acPlan.getTasks()) {
                    ObjectNode task = om.createObjectNode();
                    task.put("task-id", t.getTaskId().toString());
                    ArrayNode dependentTasksJSON = om.createArrayNode();
                    for (TaskId dependent : t.getDependents()) {
                        dependentTasksJSON.add(dependent.toString());
                        task.set("dependents", dependentTasksJSON);
                        ArrayNode dependencyTasksJSON = om.createArrayNode();
                        for (TaskId dependency : t.getDependencies()) {
                            dependencyTasksJSON.add(dependency.toString());
                        }
                        task.set("dependencies", dependencyTasksJSON);
                        tasks.add(task);
                    }
                    entry.set("tasks", tasks);
                    acTasks.add(entry);
                }
            }
            planJSON.set("activities", acTasks);
            ArrayNode tClusters = om.createArrayNode();
            for (TaskCluster tc : acp.getTaskClusters()) {
                ObjectNode c = om.createObjectNode();
                c.put("task-cluster-id", String.valueOf(tc.getTaskClusterId()));
                ArrayNode tasksAry = om.createArrayNode();
                for (Task t : tc.getTasks()) {
                    tasksAry.add(t.getTaskId().toString());
                }
                c.set("tasks", tasksAry);
                ArrayNode prodParts = om.createArrayNode();
                for (PartitionId p : tc.getProducedPartitions()) {
                    prodParts.add(p.toString());
                }
                c.set("produced-partitions", prodParts);
                ArrayNode reqdParts = om.createArrayNode();
                for (PartitionId p : tc.getRequiredPartitions()) {
                    reqdParts.add(p.toString());
                }
                c.set("required-partitions", reqdParts);
                ArrayNode attempts = om.createArrayNode();
                List<TaskClusterAttempt> tcAttempts = tc.getAttempts();
                if (tcAttempts != null) {
                    for (TaskClusterAttempt tca : tcAttempts) {
                        ObjectNode attempt = om.createObjectNode();
                        attempt.put("attempt", tca.getAttempt());
                        attempt.putPOJO("status", tca.getStatus());
                        attempt.put("start-time", tca.getStartTime());
                        attempt.put("end-time", tca.getEndTime());
                        ArrayNode taskAttempts = om.createArrayNode();
                        for (TaskAttempt ta : tca.getTaskAttempts().values()) {
                            ObjectNode taskAttempt = om.createObjectNode();
                            taskAttempt.putPOJO("task-id", ta.getTaskAttemptId().getTaskId());
                            taskAttempt.putPOJO("task-attempt-id", ta.getTaskAttemptId());
                            taskAttempt.putPOJO("status", ta.getStatus());
                            taskAttempt.put("node-id", ta.getNodeId());
                            taskAttempt.put("start-time", ta.getStartTime());
                            taskAttempt.put("end-time", ta.getEndTime());
                            List<Exception> exceptions = ta.getExceptions();
                            if (exceptions != null && !exceptions.isEmpty()) {
                                List<Exception> filteredExceptions = ExceptionUtils.getActualExceptions(exceptions);
                                for (Exception exception : filteredExceptions) {
                                    StringWriter exceptionWriter = new StringWriter();
                                    exception.printStackTrace(new PrintWriter(exceptionWriter));
                                    taskAttempt.put("failure-details", exceptionWriter.toString());
                                }
                            }
                            taskAttempts.add(taskAttempt);
                        }
                        attempt.set("task-attempts", taskAttempts);
                        attempts.add(attempt);
                    }
                }
                c.set("attempts", attempts);
                tClusters.add(c);
            }
            planJSON.set("task-clusters", tClusters);
            acJSON.set("plan", planJSON);
        }
        aClusters.add(acJSON);
    }
    result.set("activity-clusters", aClusters);
    result.set("profile", profile.toJSON());
    return result;
}
Also used : TaskId(org.apache.hyracks.api.dataflow.TaskId) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) PartitionId(org.apache.hyracks.api.partitions.PartitionId) Constraint(org.apache.hyracks.api.constraints.Constraint) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) ActivityCluster(org.apache.hyracks.api.job.ActivityCluster) StringWriter(java.io.StringWriter) ActivityPartitionDetails(org.apache.hyracks.control.cc.executor.ActivityPartitionDetails) ArrayNode(com.fasterxml.jackson.databind.node.ArrayNode) HashMap(java.util.HashMap) Map(java.util.Map) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) PrintWriter(java.io.PrintWriter)

Example 18 with PartitionId

use of org.apache.hyracks.api.partitions.PartitionId in project asterixdb by apache.

the class PartitionMatchMaker method matchPartitionRequest.

public Pair<PartitionDescriptor, PartitionRequest> matchPartitionRequest(PartitionRequest partitionRequest) {
    Pair<PartitionDescriptor, PartitionRequest> match = null;
    PartitionId pid = partitionRequest.getPartitionId();
    List<PartitionDescriptor> descriptors = partitionDescriptors.get(pid);
    if (descriptors != null) {
        Iterator<PartitionDescriptor> i = descriptors.iterator();
        while (i.hasNext()) {
            PartitionDescriptor descriptor = i.next();
            if (descriptor.getState().isAtLeast(partitionRequest.getMinimumState())) {
                match = Pair.<PartitionDescriptor, PartitionRequest>of(descriptor, partitionRequest);
                if (!descriptor.isReusable()) {
                    i.remove();
                }
                break;
            }
        }
        if (descriptors.isEmpty()) {
            partitionDescriptors.remove(pid);
        }
    }
    if (match == null) {
        List<PartitionRequest> requests = partitionRequests.get(pid);
        if (requests == null) {
            requests = new ArrayList<PartitionRequest>();
            partitionRequests.put(pid, requests);
        }
        requests.add(partitionRequest);
    }
    return match;
}
Also used : PartitionRequest(org.apache.hyracks.control.common.job.PartitionRequest) PartitionDescriptor(org.apache.hyracks.control.common.job.PartitionDescriptor) PartitionId(org.apache.hyracks.api.partitions.PartitionId)

Example 19 with PartitionId

use of org.apache.hyracks.api.partitions.PartitionId in project asterixdb by apache.

the class RegisterPartitionRequestWork method run.

@Override
public void run() {
    PartitionId pid = partitionRequest.getPartitionId();
    IJobManager jobManager = ccs.getJobManager();
    JobRun run = jobManager.get(pid.getJobId());
    if (run == null) {
        return;
    }
    PartitionMatchMaker pmm = run.getPartitionMatchMaker();
    Pair<PartitionDescriptor, PartitionRequest> match = pmm.matchPartitionRequest(partitionRequest);
    if (match != null) {
        try {
            PartitionUtils.reportPartitionMatch(ccs, pid, match);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
Also used : PartitionRequest(org.apache.hyracks.control.common.job.PartitionRequest) PartitionDescriptor(org.apache.hyracks.control.common.job.PartitionDescriptor) IJobManager(org.apache.hyracks.control.cc.job.IJobManager) PartitionMatchMaker(org.apache.hyracks.control.cc.partitions.PartitionMatchMaker) PartitionId(org.apache.hyracks.api.partitions.PartitionId) JobRun(org.apache.hyracks.control.cc.job.JobRun)

Example 20 with PartitionId

use of org.apache.hyracks.api.partitions.PartitionId in project asterixdb by apache.

the class Joblet method advertisePartitionRequest.

public synchronized void advertisePartitionRequest(TaskAttemptId taId, Collection<PartitionId> pids, IPartitionCollector collector, PartitionState minState) throws Exception {
    for (PartitionId pid : pids) {
        partitionRequestMap.put(pid, collector);
        PartitionRequest req = new PartitionRequest(pid, nodeController.getId(), taId, minState);
        nodeController.getClusterController().registerPartitionRequest(req);
    }
}
Also used : PartitionRequest(org.apache.hyracks.control.common.job.PartitionRequest) PartitionId(org.apache.hyracks.api.partitions.PartitionId)

Aggregations

PartitionId (org.apache.hyracks.api.partitions.PartitionId)22 PartitionRequest (org.apache.hyracks.control.common.job.PartitionRequest)6 ConnectorDescriptorId (org.apache.hyracks.api.dataflow.ConnectorDescriptorId)5 PartitionDescriptor (org.apache.hyracks.control.common.job.PartitionDescriptor)5 ArrayList (java.util.ArrayList)4 PartitionMatchMaker (org.apache.hyracks.control.cc.partitions.PartitionMatchMaker)4 List (java.util.List)3 Pair (org.apache.commons.lang3.tuple.Pair)3 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)2 ArrayNode (com.fasterxml.jackson.databind.node.ArrayNode)2 ObjectNode (com.fasterxml.jackson.databind.node.ObjectNode)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 PartitionChannel (org.apache.hyracks.api.comm.PartitionChannel)2 ActivityId (org.apache.hyracks.api.dataflow.ActivityId)2 TaskId (org.apache.hyracks.api.dataflow.TaskId)2 IConnectorPolicy (org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy)2 JobId (org.apache.hyracks.api.job.JobId)2 IJobManager (org.apache.hyracks.control.cc.job.IJobManager)2 JobRun (org.apache.hyracks.control.cc.job.JobRun)2