Search in sources :

Example 11 with ConnectorDescriptorId

use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.

the class ActivityClusterPlanner method buildConnectorPolicyAwareTaskClusters.

private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac, Map<ActivityId, ActivityPlan> activityPlanMap, Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
    Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        ActivityPlan ap = activityPlanMap.get(anId);
        Task[] tasks = ap.getTasks();
        for (Task t : tasks) {
            Set<TaskId> cluster = new HashSet<>();
            TaskId tid = t.getTaskId();
            cluster.add(tid);
            taskClusterMap.put(tid, cluster);
        }
    }
    JobRun jobRun = executor.getJobRun();
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
    for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
        Set<TaskId> cluster = taskClusterMap.get(e.getKey());
        for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
            IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
            if (cPolicy.requiresProducerConsumerCoscheduling()) {
                cluster.add(p.getLeft());
            }
        }
    }
    /*
         * We compute the transitive closure of this (producer-consumer) relation to find the largest set of
         * tasks that need to be co-scheduled.
         */
    int counter = 0;
    TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
    Map<TaskId, Integer> ordinalMap = new HashMap<>();
    for (TaskId tid : taskClusterMap.keySet()) {
        ordinalList[counter] = tid;
        ordinalMap.put(tid, counter);
        ++counter;
    }
    int n = ordinalList.length;
    BitSet[] paths = new BitSet[n];
    for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
        int i = ordinalMap.get(e.getKey());
        BitSet bsi = paths[i];
        if (bsi == null) {
            bsi = new BitSet(n);
            paths[i] = bsi;
        }
        for (TaskId ttid : e.getValue()) {
            int j = ordinalMap.get(ttid);
            paths[i].set(j);
            BitSet bsj = paths[j];
            if (bsj == null) {
                bsj = new BitSet(n);
                paths[j] = bsj;
            }
            bsj.set(i);
        }
    }
    for (int k = 0; k < n; ++k) {
        for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
            for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
                paths[i].set(j, paths[k].get(j));
                paths[j].set(i, paths[i].get(j));
            }
        }
    }
    BitSet pending = new BitSet(n);
    pending.set(0, n);
    List<List<TaskId>> clusters = new ArrayList<>();
    for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
        List<TaskId> cluster = new ArrayList<>();
        for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
            cluster.add(ordinalList[j]);
            pending.clear(j);
        }
        clusters.add(cluster);
    }
    List<TaskCluster> tcSet = new ArrayList<>();
    counter = 0;
    for (List<TaskId> cluster : clusters) {
        List<Task> taskStates = new ArrayList<>();
        for (TaskId tid : cluster) {
            taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
        }
        TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac, taskStates.toArray(new Task[taskStates.size()]));
        tcSet.add(tc);
        for (TaskId tid : cluster) {
            activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
        }
    }
    return tcSet.toArray(new TaskCluster[tcSet.size()]);
}
Also used : Task(org.apache.hyracks.control.cc.job.Task) TaskId(org.apache.hyracks.api.dataflow.TaskId) HashSet(java.util.HashSet) Set(java.util.Set) BitSet(java.util.BitSet) HashMap(java.util.HashMap) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet) IConnectorPolicy(org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy) ActivityPlan(org.apache.hyracks.control.cc.job.ActivityPlan) BitSet(java.util.BitSet) TaskCluster(org.apache.hyracks.control.cc.job.TaskCluster) HashMap(java.util.HashMap) Map(java.util.Map) TaskClusterId(org.apache.hyracks.control.cc.job.TaskClusterId) JobRun(org.apache.hyracks.control.cc.job.JobRun)

Example 12 with ConnectorDescriptorId

use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.

the class ActivityClusterPlanner method computeTaskClusters.

private TaskCluster[] computeTaskClusters(ActivityCluster ac, JobRun jobRun, Map<ActivityId, ActivityPlan> activityPlanMap) {
    Set<ActivityId> activities = ac.getActivityMap().keySet();
    Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity = computeTaskConnectivity(jobRun, activityPlanMap, activities);
    TaskCluster[] taskClusters = ac.getActivityClusterGraph().isUseConnectorPolicyForScheduling() ? buildConnectorPolicyAwareTaskClusters(ac, activityPlanMap, taskConnectivity) : buildConnectorPolicyUnawareTaskClusters(ac, activityPlanMap);
    for (TaskCluster tc : taskClusters) {
        Set<TaskCluster> tcDependencyTaskClusters = tc.getDependencyTaskClusters();
        for (Task ts : tc.getTasks()) {
            TaskId tid = ts.getTaskId();
            List<Pair<TaskId, ConnectorDescriptorId>> cInfoList = taskConnectivity.get(tid);
            if (cInfoList != null) {
                for (Pair<TaskId, ConnectorDescriptorId> p : cInfoList) {
                    Task targetTS = activityPlanMap.get(p.getLeft().getActivityId()).getTasks()[p.getLeft().getPartition()];
                    TaskCluster targetTC = targetTS.getTaskCluster();
                    if (targetTC != tc) {
                        ConnectorDescriptorId cdId = p.getRight();
                        PartitionId pid = new PartitionId(jobRun.getJobId(), cdId, tid.getPartition(), p.getLeft().getPartition());
                        tc.getProducedPartitions().add(pid);
                        targetTC.getRequiredPartitions().add(pid);
                        partitionProducingTaskClusterMap.put(pid, tc);
                    }
                }
            }
            for (TaskId dTid : ts.getDependencies()) {
                TaskCluster dTC = getTaskCluster(dTid);
                dTC.getDependentTaskClusters().add(tc);
                tcDependencyTaskClusters.add(dTC);
            }
        }
    }
    return taskClusters;
}
Also used : Task(org.apache.hyracks.control.cc.job.Task) TaskId(org.apache.hyracks.api.dataflow.TaskId) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) PartitionId(org.apache.hyracks.api.partitions.PartitionId) TaskCluster(org.apache.hyracks.control.cc.job.TaskCluster) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.commons.lang3.tuple.Pair)

Example 13 with ConnectorDescriptorId

use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.

the class ActivityClusterPlanner method computePartitionCounts.

private Map<ActivityId, ActivityPartitionDetails> computePartitionCounts(ActivityCluster ac) throws HyracksException {
    PartitionConstraintSolver solver = executor.getSolver();
    Set<LValueConstraintExpression> lValues = new HashSet<>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        lValues.add(new PartitionCountExpression(anId.getOperatorDescriptorId()));
    }
    solver.solve(lValues);
    Map<OperatorDescriptorId, Integer> nPartMap = new HashMap<>();
    for (LValueConstraintExpression lv : lValues) {
        Object value = solver.getValue(lv);
        if (value == null) {
            throw new HyracksException("No value found for " + lv);
        }
        if (!(value instanceof Number)) {
            throw new HyracksException("Unexpected type of value bound to " + lv + ": " + value.getClass() + "(" + value + ")");
        }
        int nParts = ((Number) value).intValue();
        if (nParts <= 0) {
            throw new HyracksException("Unsatisfiable number of partitions for " + lv + ": " + nParts);
        }
        nPartMap.put(((PartitionCountExpression) lv).getOperatorDescriptorId(), nParts);
    }
    Map<ActivityId, ActivityPartitionDetails> activityPartsMap = new HashMap<>();
    for (ActivityId anId : ac.getActivityMap().keySet()) {
        int nParts = nPartMap.get(anId.getOperatorDescriptorId());
        int[] nInputPartitions = null;
        List<IConnectorDescriptor> inputs = ac.getActivityInputMap().get(anId);
        if (inputs != null) {
            nInputPartitions = new int[inputs.size()];
            for (int i = 0; i < nInputPartitions.length; ++i) {
                ConnectorDescriptorId cdId = inputs.get(i).getConnectorId();
                ActivityId aid = ac.getProducerActivity(cdId);
                Integer nPartInt = nPartMap.get(aid.getOperatorDescriptorId());
                nInputPartitions[i] = nPartInt;
            }
        }
        int[] nOutputPartitions = null;
        List<IConnectorDescriptor> outputs = ac.getActivityOutputMap().get(anId);
        if (outputs != null) {
            nOutputPartitions = new int[outputs.size()];
            for (int i = 0; i < nOutputPartitions.length; ++i) {
                ConnectorDescriptorId cdId = outputs.get(i).getConnectorId();
                ActivityId aid = ac.getConsumerActivity(cdId);
                Integer nPartInt = nPartMap.get(aid.getOperatorDescriptorId());
                nOutputPartitions[i] = nPartInt;
            }
        }
        ActivityPartitionDetails apd = new ActivityPartitionDetails(nParts, nInputPartitions, nOutputPartitions);
        activityPartsMap.put(anId, apd);
    }
    return activityPartsMap;
}
Also used : IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) OperatorDescriptorId(org.apache.hyracks.api.dataflow.OperatorDescriptorId) HashMap(java.util.HashMap) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) LValueConstraintExpression(org.apache.hyracks.api.constraints.expressions.LValueConstraintExpression) PartitionCountExpression(org.apache.hyracks.api.constraints.expressions.PartitionCountExpression) HashSet(java.util.HashSet)

Example 14 with ConnectorDescriptorId

use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.

the class ActivityClusterPlanner method computeTaskConnectivity.

private Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> computeTaskConnectivity(JobRun jobRun, Map<ActivityId, ActivityPlan> activityPlanMap, Set<ActivityId> activities) {
    Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity = new HashMap<>();
    ActivityClusterGraph acg = jobRun.getActivityClusterGraph();
    BitSet targetBitmap = new BitSet();
    for (ActivityId ac1 : activities) {
        ActivityCluster ac = acg.getActivityMap().get(ac1);
        Task[] ac1TaskStates = activityPlanMap.get(ac1).getTasks();
        int nProducers = ac1TaskStates.length;
        List<IConnectorDescriptor> outputConns = ac.getActivityOutputMap().get(ac1);
        if (outputConns == null) {
            continue;
        }
        for (IConnectorDescriptor c : outputConns) {
            ConnectorDescriptorId cdId = c.getConnectorId();
            ActivityId ac2 = ac.getConsumerActivity(cdId);
            Task[] ac2TaskStates = activityPlanMap.get(ac2).getTasks();
            int nConsumers = ac2TaskStates.length;
            if (c.allProducersToAllConsumers()) {
                List<Pair<TaskId, ConnectorDescriptorId>> cInfoList = new ArrayList<>();
                for (int j = 0; j < nConsumers; j++) {
                    TaskId targetTID = ac2TaskStates[j].getTaskId();
                    cInfoList.add(Pair.of(targetTID, cdId));
                }
                for (int i = 0; i < nProducers; ++i) {
                    taskConnectivity.put(ac1TaskStates[i].getTaskId(), cInfoList);
                }
                continue;
            }
            for (int i = 0; i < nProducers; ++i) {
                c.indicateTargetPartitions(nProducers, nConsumers, i, targetBitmap);
                List<Pair<TaskId, ConnectorDescriptorId>> cInfoList = taskConnectivity.get(ac1TaskStates[i].getTaskId());
                if (cInfoList == null) {
                    cInfoList = new ArrayList<>();
                    taskConnectivity.put(ac1TaskStates[i].getTaskId(), cInfoList);
                }
                for (int j = targetBitmap.nextSetBit(0); j >= 0; j = targetBitmap.nextSetBit(j + 1)) {
                    TaskId targetTID = ac2TaskStates[j].getTaskId();
                    cInfoList.add(Pair.of(targetTID, cdId));
                }
            }
        }
    }
    return taskConnectivity;
}
Also used : IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) Task(org.apache.hyracks.control.cc.job.Task) TaskId(org.apache.hyracks.api.dataflow.TaskId) HashMap(java.util.HashMap) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) BitSet(java.util.BitSet) ArrayList(java.util.ArrayList) ActivityCluster(org.apache.hyracks.api.job.ActivityCluster) ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.commons.lang3.tuple.Pair)

Example 15 with ConnectorDescriptorId

use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.

the class JobExecutor method assignRunnabilityRank.

/*
     * Runnability rank has the following semantics
     * Runnability(Runnable TaskCluster depending on completed TaskClusters) = {RUNNABLE, 0}
     * Runnability(Runnable TaskCluster) = max(Rank(Dependent TaskClusters)) + 1
     * Runnability(Non-schedulable TaskCluster) = {NOT_RUNNABLE, _}
     */
private Runnability assignRunnabilityRank(TaskCluster goal, Map<TaskCluster, Runnability> runnabilityMap) {
    if (LOGGER.isLoggable(Level.FINE)) {
        LOGGER.fine("Computing runnability: " + goal);
    }
    if (runnabilityMap.containsKey(goal)) {
        return runnabilityMap.get(goal);
    }
    TaskClusterAttempt lastAttempt = findLastTaskClusterAttempt(goal);
    if (lastAttempt != null) {
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("Last Attempt Status: " + lastAttempt.getStatus());
        }
        if (lastAttempt.getStatus() == TaskClusterAttempt.TaskClusterStatus.COMPLETED) {
            Runnability runnability = new Runnability(Runnability.Tag.COMPLETED, Integer.MIN_VALUE);
            runnabilityMap.put(goal, runnability);
            return runnability;
        }
        if (lastAttempt.getStatus() == TaskClusterAttempt.TaskClusterStatus.RUNNING) {
            Runnability runnability = new Runnability(Runnability.Tag.RUNNING, Integer.MIN_VALUE);
            runnabilityMap.put(goal, runnability);
            return runnability;
        }
    }
    Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicyMap = jobRun.getConnectorPolicyMap();
    PartitionMatchMaker pmm = jobRun.getPartitionMatchMaker();
    Runnability aggregateRunnability = new Runnability(Runnability.Tag.RUNNABLE, 0);
    for (PartitionId pid : goal.getRequiredPartitions()) {
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("Inspecting required partition: " + pid);
        }
        Runnability runnability;
        ConnectorDescriptorId cdId = pid.getConnectorDescriptorId();
        IConnectorPolicy cPolicy = connectorPolicyMap.get(cdId);
        PartitionState maxState = pmm.getMaximumAvailableState(pid);
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("Policy: " + cPolicy + " maxState: " + maxState);
        }
        if (PartitionState.COMMITTED.equals(maxState)) {
            runnability = new Runnability(Runnability.Tag.RUNNABLE, 0);
        } else if (PartitionState.STARTED.equals(maxState) && !cPolicy.consumerWaitsForProducerToFinish()) {
            runnability = new Runnability(Runnability.Tag.RUNNABLE, 1);
        } else {
            runnability = assignRunnabilityRank(partitionProducingTaskClusterMap.get(pid), runnabilityMap);
            switch(runnability.getTag()) {
                case RUNNABLE:
                    if (cPolicy.consumerWaitsForProducerToFinish()) {
                        runnability = new Runnability(Runnability.Tag.NOT_RUNNABLE, Integer.MAX_VALUE);
                    } else {
                        runnability = new Runnability(Runnability.Tag.RUNNABLE, runnability.getPriority() + 1);
                    }
                    break;
                case NOT_RUNNABLE:
                    break;
                case RUNNING:
                    if (cPolicy.consumerWaitsForProducerToFinish()) {
                        runnability = new Runnability(Runnability.Tag.NOT_RUNNABLE, Integer.MAX_VALUE);
                    } else {
                        runnability = new Runnability(Runnability.Tag.RUNNABLE, 1);
                    }
                    break;
                default:
                    break;
            }
        }
        aggregateRunnability = Runnability.getWorstCase(aggregateRunnability, runnability);
        if (aggregateRunnability.getTag() == Runnability.Tag.NOT_RUNNABLE) {
            // already not runnable -- cannot get better. bail.
            break;
        }
        if (LOGGER.isLoggable(Level.FINE)) {
            LOGGER.fine("aggregateRunnability: " + aggregateRunnability);
        }
    }
    runnabilityMap.put(goal, aggregateRunnability);
    return aggregateRunnability;
}
Also used : TaskClusterAttempt(org.apache.hyracks.control.cc.job.TaskClusterAttempt) IConnectorPolicy(org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) PartitionState(org.apache.hyracks.control.common.job.PartitionState) PartitionMatchMaker(org.apache.hyracks.control.cc.partitions.PartitionMatchMaker) PartitionId(org.apache.hyracks.api.partitions.PartitionId)

Aggregations

ConnectorDescriptorId (org.apache.hyracks.api.dataflow.ConnectorDescriptorId)18 HashMap (java.util.HashMap)10 IConnectorDescriptor (org.apache.hyracks.api.dataflow.IConnectorDescriptor)9 List (java.util.List)7 ActivityId (org.apache.hyracks.api.dataflow.ActivityId)7 ArrayList (java.util.ArrayList)6 Map (java.util.Map)6 Pair (org.apache.commons.lang3.tuple.Pair)6 IConnectorPolicy (org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy)6 PartitionId (org.apache.hyracks.api.partitions.PartitionId)5 IOperatorDescriptor (org.apache.hyracks.api.dataflow.IOperatorDescriptor)4 OperatorDescriptorId (org.apache.hyracks.api.dataflow.OperatorDescriptorId)4 TaskId (org.apache.hyracks.api.dataflow.TaskId)4 ActivityClusterGraph (org.apache.hyracks.api.job.ActivityClusterGraph)4 Task (org.apache.hyracks.control.cc.job.Task)4 BitSet (java.util.BitSet)3 AlgebricksPartitionConstraint (org.apache.hyracks.algebricks.common.constraints.AlgebricksPartitionConstraint)3 Constraint (org.apache.hyracks.api.constraints.Constraint)3 LValueConstraintExpression (org.apache.hyracks.api.constraints.expressions.LValueConstraintExpression)3 JobId (org.apache.hyracks.api.job.JobId)3