Search in sources :

Example 1 with ActivityClusterGraph

use of org.apache.hyracks.api.job.ActivityClusterGraph in project asterixdb by apache.

the class ActivityClusterGraphRewriter method rewriteIntraActivityCluster.

/**
     * rewrite an activity cluster internally
     *
     * @param ac
     *            the activity cluster to be rewritten
     */
private void rewriteIntraActivityCluster(ActivityCluster ac, Map<IActivity, SuperActivity> invertedActivitySuperActivityMap) {
    Map<ActivityId, IActivity> activities = ac.getActivityMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityInputMap = ac.getActivityInputMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityOutputMap = ac.getActivityOutputMap();
    Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> connectorActivityMap = ac.getConnectorActivityMap();
    ActivityClusterGraph acg = ac.getActivityClusterGraph();
    Map<ActivityId, IActivity> startActivities = new HashMap<ActivityId, IActivity>();
    Map<ActivityId, SuperActivity> superActivities = new HashMap<ActivityId, SuperActivity>();
    Map<ActivityId, Queue<IActivity>> toBeExpendedMap = new HashMap<ActivityId, Queue<IActivity>>();
    /**
         * Build the initial super activities
         */
    for (Entry<ActivityId, IActivity> entry : activities.entrySet()) {
        ActivityId activityId = entry.getKey();
        IActivity activity = entry.getValue();
        if (activityInputMap.get(activityId) == null) {
            startActivities.put(activityId, activity);
            /**
                 * use the start activity's id as the id of the super activity
                 */
            createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, activityId, activity);
        }
    }
    /**
         * expand one-to-one connected activity cluster by the BFS order.
         * after the while-loop, the original activities are partitioned
         * into equivalent classes, one-per-super-activity.
         */
    Map<ActivityId, SuperActivity> clonedSuperActivities = new HashMap<ActivityId, SuperActivity>();
    while (toBeExpendedMap.size() > 0) {
        clonedSuperActivities.clear();
        clonedSuperActivities.putAll(superActivities);
        for (Entry<ActivityId, SuperActivity> entry : clonedSuperActivities.entrySet()) {
            ActivityId superActivityId = entry.getKey();
            SuperActivity superActivity = entry.getValue();
            /**
                 * for the case where the super activity has already been swallowed
                 */
            if (superActivities.get(superActivityId) == null) {
                continue;
            }
            /**
                 * expend the super activity
                 */
            Queue<IActivity> toBeExpended = toBeExpendedMap.get(superActivityId);
            if (toBeExpended == null) {
                /**
                     * Nothing to expand
                     */
                continue;
            }
            IActivity expendingActivity = toBeExpended.poll();
            List<IConnectorDescriptor> outputConnectors = activityOutputMap.get(expendingActivity.getActivityId());
            if (outputConnectors != null) {
                for (IConnectorDescriptor outputConn : outputConnectors) {
                    Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = connectorActivityMap.get(outputConn.getConnectorId());
                    IActivity newActivity = endPoints.getRight().getLeft();
                    SuperActivity existingSuperActivity = invertedActivitySuperActivityMap.get(newActivity);
                    if (outputConn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
                        /**
                             * expend the super activity cluster on an one-to-one out-bound connection
                             */
                        if (existingSuperActivity == null) {
                            superActivity.addActivity(newActivity);
                            toBeExpended.add(newActivity);
                            invertedActivitySuperActivityMap.put(newActivity, superActivity);
                        } else {
                            /**
                                 * the two activities already in the same super activity
                                 */
                            if (existingSuperActivity == superActivity) {
                                continue;
                            }
                            /**
                                 * swallow an existing super activity
                                 */
                            swallowExistingSuperActivity(superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, superActivity, superActivityId, existingSuperActivity);
                        }
                    } else {
                        if (existingSuperActivity == null) {
                            /**
                                 * create new activity
                                 */
                            createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap, newActivity.getActivityId(), newActivity);
                        }
                    }
                }
            }
            /**
                 * remove the to-be-expended queue if it is empty
                 */
            if (toBeExpended.size() == 0) {
                toBeExpendedMap.remove(superActivityId);
            }
        }
    }
    Map<ConnectorDescriptorId, IConnectorDescriptor> connMap = ac.getConnectorMap();
    Map<ConnectorDescriptorId, RecordDescriptor> connRecordDesc = ac.getConnectorRecordDescriptorMap();
    Map<SuperActivity, Integer> superActivityProducerPort = new HashMap<SuperActivity, Integer>();
    Map<SuperActivity, Integer> superActivityConsumerPort = new HashMap<SuperActivity, Integer>();
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        superActivityProducerPort.put(entry.getValue(), 0);
        superActivityConsumerPort.put(entry.getValue(), 0);
    }
    /**
         * create a new activity cluster to replace the old activity cluster
         */
    ActivityCluster newActivityCluster = new ActivityCluster(acg, ac.getId());
    newActivityCluster.setConnectorPolicyAssignmentPolicy(ac.getConnectorPolicyAssignmentPolicy());
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        newActivityCluster.addActivity(entry.getValue());
        acg.getActivityMap().put(entry.getKey(), newActivityCluster);
    }
    /**
         * Setup connectors: either inside a super activity or among super activities
         */
    for (Entry<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> entry : connectorActivityMap.entrySet()) {
        ConnectorDescriptorId connectorId = entry.getKey();
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = entry.getValue();
        IActivity producerActivity = endPoints.getLeft().getLeft();
        IActivity consumerActivity = endPoints.getRight().getLeft();
        int producerPort = endPoints.getLeft().getRight();
        int consumerPort = endPoints.getRight().getRight();
        RecordDescriptor recordDescriptor = connRecordDesc.get(connectorId);
        IConnectorDescriptor conn = connMap.get(connectorId);
        if (conn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
            /**
                 * connection edge between inner activities
                 */
            SuperActivity residingSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            residingSuperActivity.connect(conn, producerActivity, producerPort, consumerActivity, consumerPort, recordDescriptor);
        } else {
            /**
                 * connection edge between super activities
                 */
            SuperActivity producerSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            SuperActivity consumerSuperActivity = invertedActivitySuperActivityMap.get(consumerActivity);
            int producerSAPort = superActivityProducerPort.get(producerSuperActivity);
            int consumerSAPort = superActivityConsumerPort.get(consumerSuperActivity);
            newActivityCluster.addConnector(conn);
            newActivityCluster.connect(conn, producerSuperActivity, producerSAPort, consumerSuperActivity, consumerSAPort, recordDescriptor);
            /**
                 * bridge the port
                 */
            producerSuperActivity.setClusterOutputIndex(producerSAPort, producerActivity.getActivityId(), producerPort);
            consumerSuperActivity.setClusterInputIndex(consumerSAPort, consumerActivity.getActivityId(), consumerPort);
            acg.getConnectorMap().put(connectorId, newActivityCluster);
            /**
                 * increasing the port number for the producer and consumer
                 */
            superActivityProducerPort.put(producerSuperActivity, ++producerSAPort);
            superActivityConsumerPort.put(consumerSuperActivity, ++consumerSAPort);
        }
    }
    /**
         * Set up the roots of the new activity cluster
         */
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        List<IConnectorDescriptor> connIds = newActivityCluster.getActivityOutputMap().get(entry.getKey());
        if (connIds == null || connIds.size() == 0) {
            newActivityCluster.addRoot(entry.getValue());
        }
    }
    /**
         * set up the blocked2Blocker mapping, which will be updated in the rewriteInterActivityCluster call
         */
    newActivityCluster.getBlocked2BlockerMap().putAll(ac.getBlocked2BlockerMap());
    /**
         * replace the old activity cluster with the new activity cluster
         */
    acg.getActivityClusterMap().put(ac.getId(), newActivityCluster);
}
Also used : HashMap(java.util.HashMap) RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ConnectorDescriptorId(org.apache.hyracks.api.dataflow.ConnectorDescriptorId) IActivity(org.apache.hyracks.api.dataflow.IActivity) SuperActivity(org.apache.hyracks.api.rewriter.runtime.SuperActivity) List(java.util.List) LinkedList(java.util.LinkedList) Queue(java.util.Queue) Pair(org.apache.commons.lang3.tuple.Pair) IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) ActivityCluster(org.apache.hyracks.api.job.ActivityCluster) ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph)

Example 2 with ActivityClusterGraph

use of org.apache.hyracks.api.job.ActivityClusterGraph in project asterixdb by apache.

the class JobSpecificationActivityClusterGraphGeneratorFactory method createActivityClusterGraphGenerator.

@Override
public IActivityClusterGraphGenerator createActivityClusterGraphGenerator(JobId jobId, final ICCServiceContext ccServiceCtx, Set<JobFlag> jobFlags) throws HyracksException {
    final JobActivityGraphBuilder builder = new JobActivityGraphBuilder(spec, jobFlags);
    PlanUtils.visit(spec, new IConnectorDescriptorVisitor() {

        @Override
        public void visit(IConnectorDescriptor conn) throws HyracksException {
            builder.addConnector(conn);
        }
    });
    PlanUtils.visit(spec, new IOperatorDescriptorVisitor() {

        @Override
        public void visit(IOperatorDescriptor op) {
            op.contributeActivities(builder);
        }
    });
    builder.finish();
    final JobActivityGraph jag = builder.getActivityGraph();
    ActivityClusterGraphBuilder acgb = new ActivityClusterGraphBuilder();
    final ActivityClusterGraph acg = acgb.inferActivityClusters(jobId, jag);
    acg.setFrameSize(spec.getFrameSize());
    acg.setMaxReattempts(spec.getMaxReattempts());
    acg.setJobletEventListenerFactory(spec.getJobletEventListenerFactory());
    acg.setGlobalJobDataFactory(spec.getGlobalJobDataFactory());
    acg.setConnectorPolicyAssignmentPolicy(spec.getConnectorPolicyAssignmentPolicy());
    acg.setUseConnectorPolicyForScheduling(spec.isUseConnectorPolicyForScheduling());
    final Set<Constraint> constraints = new HashSet<>();
    final IConstraintAcceptor acceptor = new IConstraintAcceptor() {

        @Override
        public void addConstraint(Constraint constraint) {
            constraints.add(constraint);
        }
    };
    PlanUtils.visit(spec, new IOperatorDescriptorVisitor() {

        @Override
        public void visit(IOperatorDescriptor op) {
            op.contributeSchedulingConstraints(acceptor, ccServiceCtx);
        }
    });
    PlanUtils.visit(spec, new IConnectorDescriptorVisitor() {

        @Override
        public void visit(IConnectorDescriptor conn) {
            conn.contributeSchedulingConstraints(acceptor, acg.getConnectorMap().get(conn.getConnectorId()), ccServiceCtx);
        }
    });
    constraints.addAll(spec.getUserConstraints());
    return new IActivityClusterGraphGenerator() {

        @Override
        public ActivityClusterGraph initialize() {
            ActivityClusterGraphRewriter rewriter = new ActivityClusterGraphRewriter();
            rewriter.rewrite(acg);
            return acg;
        }

        @Override
        public Set<Constraint> getConstraints() {
            return constraints;
        }
    };
}
Also used : IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) Constraint(org.apache.hyracks.api.constraints.Constraint) IConstraintAcceptor(org.apache.hyracks.api.constraints.IConstraintAcceptor) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph) IOperatorDescriptor(org.apache.hyracks.api.dataflow.IOperatorDescriptor) IActivityClusterGraphGenerator(org.apache.hyracks.api.job.IActivityClusterGraphGenerator) JobActivityGraph(org.apache.hyracks.api.job.JobActivityGraph) ActivityClusterGraphRewriter(org.apache.hyracks.api.rewriter.ActivityClusterGraphRewriter) HashSet(java.util.HashSet)

Example 3 with ActivityClusterGraph

use of org.apache.hyracks.api.job.ActivityClusterGraph in project asterixdb by apache.

the class ActivityClusterGraphBuilder method inferActivityClusters.

public ActivityClusterGraph inferActivityClusters(JobId jobId, JobActivityGraph jag) {
    /*
         * Build initial equivalence sets map. We create a map such that for each IOperatorTask, t -> { t }
         */
    Map<ActivityId, Set<ActivityId>> stageMap = new HashMap<ActivityId, Set<ActivityId>>();
    Set<Set<ActivityId>> stages = new HashSet<Set<ActivityId>>();
    for (ActivityId taskId : jag.getActivityMap().keySet()) {
        Set<ActivityId> eqSet = new HashSet<ActivityId>();
        eqSet.add(taskId);
        stageMap.put(taskId, eqSet);
        stages.add(eqSet);
    }
    boolean changed = true;
    while (changed) {
        changed = false;
        Pair<ActivityId, ActivityId> pair = findMergePair(jag, stages);
        if (pair != null) {
            merge(stageMap, stages, pair.getLeft(), pair.getRight());
            changed = true;
        }
    }
    ActivityClusterGraph acg = new ActivityClusterGraph();
    Map<ActivityId, ActivityCluster> acMap = new HashMap<ActivityId, ActivityCluster>();
    int acCounter = 0;
    Map<ActivityId, IActivity> activityNodeMap = jag.getActivityMap();
    List<ActivityCluster> acList = new ArrayList<ActivityCluster>();
    for (Set<ActivityId> stage : stages) {
        ActivityCluster ac = new ActivityCluster(acg, new ActivityClusterId(jobId, acCounter++));
        acList.add(ac);
        for (ActivityId aid : stage) {
            IActivity activity = activityNodeMap.get(aid);
            ac.addActivity(activity);
            acMap.put(aid, ac);
        }
    }
    for (Set<ActivityId> stage : stages) {
        for (ActivityId aid : stage) {
            IActivity activity = activityNodeMap.get(aid);
            ActivityCluster ac = acMap.get(aid);
            List<IConnectorDescriptor> aOutputs = jag.getActivityOutputMap().get(aid);
            if (aOutputs == null || aOutputs.isEmpty()) {
                ac.addRoot(activity);
            } else {
                int nActivityOutputs = aOutputs.size();
                for (int i = 0; i < nActivityOutputs; ++i) {
                    IConnectorDescriptor conn = aOutputs.get(i);
                    ac.addConnector(conn);
                    Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> pcPair = jag.getConnectorActivityMap().get(conn.getConnectorId());
                    ac.connect(conn, activity, i, pcPair.getRight().getLeft(), pcPair.getRight().getRight(), jag.getConnectorRecordDescriptorMap().get(conn.getConnectorId()));
                }
            }
        }
    }
    Map<ActivityId, Set<ActivityId>> blocked2BlockerMap = jag.getBlocked2BlockerMap();
    for (ActivityCluster s : acList) {
        Map<ActivityId, Set<ActivityId>> acBlocked2BlockerMap = s.getBlocked2BlockerMap();
        Set<ActivityCluster> blockerStages = new HashSet<ActivityCluster>();
        for (ActivityId t : s.getActivityMap().keySet()) {
            Set<ActivityId> blockerTasks = blocked2BlockerMap.get(t);
            acBlocked2BlockerMap.put(t, blockerTasks);
            if (blockerTasks != null) {
                for (ActivityId bt : blockerTasks) {
                    blockerStages.add(acMap.get(bt));
                }
            }
        }
        for (ActivityCluster bs : blockerStages) {
            s.getDependencies().add(bs);
        }
    }
    acg.addActivityClusters(acList);
    if (LOGGER.isLoggable(Level.FINE)) {
        LOGGER.fine(acg.toJSON().asText());
    }
    return acg;
}
Also used : IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ArrayList(java.util.ArrayList) ActivityCluster(org.apache.hyracks.api.job.ActivityCluster) IActivity(org.apache.hyracks.api.dataflow.IActivity) ActivityClusterId(org.apache.hyracks.api.job.ActivityClusterId) ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph) HashSet(java.util.HashSet) Pair(org.apache.commons.lang3.tuple.Pair)

Example 4 with ActivityClusterGraph

use of org.apache.hyracks.api.job.ActivityClusterGraph in project asterixdb by apache.

the class StartTasksWork method run.

@Override
public void run() {
    Task task = null;
    try {
        NCServiceContext serviceCtx = ncs.getContext();
        Joblet joblet = getOrCreateLocalJoblet(deploymentId, jobId, serviceCtx, acgBytes);
        final ActivityClusterGraph acg = joblet.getActivityClusterGraph();
        IRecordDescriptorProvider rdp = new IRecordDescriptorProvider() {

            @Override
            public RecordDescriptor getOutputRecordDescriptor(ActivityId aid, int outputIndex) {
                ActivityCluster ac = acg.getActivityMap().get(aid);
                IConnectorDescriptor conn = ac.getActivityOutputMap().get(aid).get(outputIndex);
                return ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
            }

            @Override
            public RecordDescriptor getInputRecordDescriptor(ActivityId aid, int inputIndex) {
                ActivityCluster ac = acg.getActivityMap().get(aid);
                IConnectorDescriptor conn = ac.getActivityInputMap().get(aid).get(inputIndex);
                return ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
            }
        };
        for (TaskAttemptDescriptor td : taskDescriptors) {
            TaskAttemptId taId = td.getTaskAttemptId();
            TaskId tid = taId.getTaskId();
            ActivityId aid = tid.getActivityId();
            ActivityCluster ac = acg.getActivityMap().get(aid);
            IActivity han = ac.getActivityMap().get(aid);
            if (LOGGER.isLoggable(Level.INFO)) {
                LOGGER.info("Initializing " + taId + " -> " + han);
            }
            final int partition = tid.getPartition();
            List<IConnectorDescriptor> inputs = ac.getActivityInputMap().get(aid);
            task = new Task(joblet, taId, han.getClass().getName(), ncs.getExecutor(), ncs, createInputChannels(td, inputs));
            IOperatorNodePushable operator = han.createPushRuntime(task, rdp, partition, td.getPartitionCount());
            List<IPartitionCollector> collectors = new ArrayList<>();
            if (inputs != null) {
                for (int i = 0; i < inputs.size(); ++i) {
                    IConnectorDescriptor conn = inputs.get(i);
                    IConnectorPolicy cPolicy = connectorPoliciesMap.get(conn.getConnectorId());
                    if (LOGGER.isLoggable(Level.INFO)) {
                        LOGGER.info("input: " + i + ": " + conn.getConnectorId());
                    }
                    RecordDescriptor recordDesc = ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
                    IPartitionCollector collector = createPartitionCollector(td, partition, task, i, conn, recordDesc, cPolicy);
                    collectors.add(collector);
                }
            }
            List<IConnectorDescriptor> outputs = ac.getActivityOutputMap().get(aid);
            if (outputs != null) {
                for (int i = 0; i < outputs.size(); ++i) {
                    final IConnectorDescriptor conn = outputs.get(i);
                    RecordDescriptor recordDesc = ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
                    IConnectorPolicy cPolicy = connectorPoliciesMap.get(conn.getConnectorId());
                    IPartitionWriterFactory pwFactory = createPartitionWriterFactory(task, cPolicy, jobId, conn, partition, taId, flags);
                    if (LOGGER.isLoggable(Level.INFO)) {
                        LOGGER.info("output: " + i + ": " + conn.getConnectorId());
                    }
                    IFrameWriter writer = conn.createPartitioner(task, recordDesc, pwFactory, partition, td.getPartitionCount(), td.getOutputPartitionCounts()[i]);
                    operator.setOutputFrameWriter(i, writer, recordDesc);
                }
            }
            task.setTaskRuntime(collectors.toArray(new IPartitionCollector[collectors.size()]), operator);
            joblet.addTask(task);
            task.start();
        }
    } catch (Exception e) {
        LOGGER.log(Level.WARNING, "Failure starting a task", e);
        // notify cc of start task failure
        List<Exception> exceptions = new ArrayList<>();
        ExceptionUtils.setNodeIds(exceptions, ncs.getId());
        ncs.getWorkQueue().schedule(new NotifyTaskFailureWork(ncs, task, exceptions));
    }
}
Also used : IFrameWriter(org.apache.hyracks.api.comm.IFrameWriter) Task(org.apache.hyracks.control.nc.Task) TaskId(org.apache.hyracks.api.dataflow.TaskId) RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) ActivityId(org.apache.hyracks.api.dataflow.ActivityId) ArrayList(java.util.ArrayList) IRecordDescriptorProvider(org.apache.hyracks.api.dataflow.value.IRecordDescriptorProvider) Joblet(org.apache.hyracks.control.nc.Joblet) IActivity(org.apache.hyracks.api.dataflow.IActivity) NCServiceContext(org.apache.hyracks.control.nc.application.NCServiceContext) INCServiceContext(org.apache.hyracks.api.application.INCServiceContext) List(java.util.List) ArrayList(java.util.ArrayList) IConnectorDescriptor(org.apache.hyracks.api.dataflow.IConnectorDescriptor) IPartitionCollector(org.apache.hyracks.api.comm.IPartitionCollector) TaskAttemptId(org.apache.hyracks.api.dataflow.TaskAttemptId) IConnectorPolicy(org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy) IPartitionWriterFactory(org.apache.hyracks.api.comm.IPartitionWriterFactory) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) UnknownHostException(java.net.UnknownHostException) HyracksException(org.apache.hyracks.api.exceptions.HyracksException) ActivityCluster(org.apache.hyracks.api.job.ActivityCluster) TaskAttemptDescriptor(org.apache.hyracks.control.common.job.TaskAttemptDescriptor) ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph) IOperatorNodePushable(org.apache.hyracks.api.dataflow.IOperatorNodePushable)

Example 5 with ActivityClusterGraph

use of org.apache.hyracks.api.job.ActivityClusterGraph in project asterixdb by apache.

the class StartTasksWork method getOrCreateLocalJoblet.

private Joblet getOrCreateLocalJoblet(DeploymentId deploymentId, JobId jobId, INCServiceContext appCtx, byte[] acgBytes) throws HyracksException {
    Map<JobId, Joblet> jobletMap = ncs.getJobletMap();
    Joblet ji = jobletMap.get(jobId);
    if (ji == null) {
        ActivityClusterGraph acg = ncs.getActivityClusterGraph(jobId);
        if (acg == null) {
            if (acgBytes == null) {
                throw HyracksException.create(ErrorCode.ERROR_FINDING_DISTRIBUTED_JOB, jobId);
            }
            acg = (ActivityClusterGraph) DeploymentUtils.deserialize(acgBytes, deploymentId, appCtx);
        }
        ji = new Joblet(ncs, deploymentId, jobId, appCtx, acg);
        jobletMap.put(jobId, ji);
    }
    return ji;
}
Also used : ActivityClusterGraph(org.apache.hyracks.api.job.ActivityClusterGraph) JobId(org.apache.hyracks.api.job.JobId) Joblet(org.apache.hyracks.control.nc.Joblet)

Aggregations

ActivityClusterGraph (org.apache.hyracks.api.job.ActivityClusterGraph)12 IConnectorDescriptor (org.apache.hyracks.api.dataflow.IConnectorDescriptor)7 HashMap (java.util.HashMap)6 ActivityId (org.apache.hyracks.api.dataflow.ActivityId)6 ArrayList (java.util.ArrayList)5 List (java.util.List)5 ActivityCluster (org.apache.hyracks.api.job.ActivityCluster)5 ConnectorDescriptorId (org.apache.hyracks.api.dataflow.ConnectorDescriptorId)4 IActivity (org.apache.hyracks.api.dataflow.IActivity)4 HyracksException (org.apache.hyracks.api.exceptions.HyracksException)4 Pair (org.apache.commons.lang3.tuple.Pair)3 TaskId (org.apache.hyracks.api.dataflow.TaskId)3 IConnectorPolicy (org.apache.hyracks.api.dataflow.connectors.IConnectorPolicy)3 INodeManager (org.apache.hyracks.control.cc.cluster.INodeManager)3 TaskAttemptDescriptor (org.apache.hyracks.control.common.job.TaskAttemptDescriptor)3 HashSet (java.util.HashSet)2 Map (java.util.Map)2 Constraint (org.apache.hyracks.api.constraints.Constraint)2 TaskAttemptId (org.apache.hyracks.api.dataflow.TaskAttemptId)2 IRecordDescriptorProvider (org.apache.hyracks.api.dataflow.value.IRecordDescriptorProvider)2