use of org.apache.hyracks.control.cc.job.JobRun in project asterixdb by apache.
the class FIFOJobQueue method pull.
@Override
public List<JobRun> pull() {
List<JobRun> jobRuns = new ArrayList<>();
Iterator<JobRun> runIterator = jobListMap.values().iterator();
while (runIterator.hasNext()) {
JobRun run = runIterator.next();
JobSpecification job = run.getJobSpecification();
// or not.
try {
IJobCapacityController.JobSubmissionStatus status = jobCapacityController.allocate(job);
// Checks if the job can be executed immediately.
if (status == IJobCapacityController.JobSubmissionStatus.EXECUTE) {
jobRuns.add(run);
// Removes the selected job.
runIterator.remove();
}
} catch (HyracksException exception) {
// The required capacity exceeds maximum capacity.
List<Exception> exceptions = new ArrayList<>();
exceptions.add(exception);
// Removes the job from the queue.
runIterator.remove();
try {
// Fails the job.
jobManager.prepareComplete(run, JobStatus.FAILURE_BEFORE_EXECUTION, exceptions);
} catch (HyracksException e) {
LOGGER.log(Level.SEVERE, e.getMessage(), e);
}
}
}
return jobRuns;
}
use of org.apache.hyracks.control.cc.job.JobRun in project asterixdb by apache.
the class ActivityClusterPlanner method buildConnectorPolicyAwareTaskClusters.
private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac, Map<ActivityId, ActivityPlan> activityPlanMap, Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>();
for (ActivityId anId : ac.getActivityMap().keySet()) {
ActivityPlan ap = activityPlanMap.get(anId);
Task[] tasks = ap.getTasks();
for (Task t : tasks) {
Set<TaskId> cluster = new HashSet<>();
TaskId tid = t.getTaskId();
cluster.add(tid);
taskClusterMap.put(tid, cluster);
}
}
JobRun jobRun = executor.getJobRun();
Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
Set<TaskId> cluster = taskClusterMap.get(e.getKey());
for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
if (cPolicy.requiresProducerConsumerCoscheduling()) {
cluster.add(p.getLeft());
}
}
}
/*
* We compute the transitive closure of this (producer-consumer) relation to find the largest set of
* tasks that need to be co-scheduled.
*/
int counter = 0;
TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
Map<TaskId, Integer> ordinalMap = new HashMap<>();
for (TaskId tid : taskClusterMap.keySet()) {
ordinalList[counter] = tid;
ordinalMap.put(tid, counter);
++counter;
}
int n = ordinalList.length;
BitSet[] paths = new BitSet[n];
for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
int i = ordinalMap.get(e.getKey());
BitSet bsi = paths[i];
if (bsi == null) {
bsi = new BitSet(n);
paths[i] = bsi;
}
for (TaskId ttid : e.getValue()) {
int j = ordinalMap.get(ttid);
paths[i].set(j);
BitSet bsj = paths[j];
if (bsj == null) {
bsj = new BitSet(n);
paths[j] = bsj;
}
bsj.set(i);
}
}
for (int k = 0; k < n; ++k) {
for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
paths[i].set(j, paths[k].get(j));
paths[j].set(i, paths[i].get(j));
}
}
}
BitSet pending = new BitSet(n);
pending.set(0, n);
List<List<TaskId>> clusters = new ArrayList<>();
for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
List<TaskId> cluster = new ArrayList<>();
for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
cluster.add(ordinalList[j]);
pending.clear(j);
}
clusters.add(cluster);
}
List<TaskCluster> tcSet = new ArrayList<>();
counter = 0;
for (List<TaskId> cluster : clusters) {
List<Task> taskStates = new ArrayList<>();
for (TaskId tid : cluster) {
taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
}
TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac, taskStates.toArray(new Task[taskStates.size()]));
tcSet.add(tc);
for (TaskId tid : cluster) {
activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
}
}
return tcSet.toArray(new TaskCluster[tcSet.size()]);
}
use of org.apache.hyracks.control.cc.job.JobRun in project asterixdb by apache.
the class AbstractTaskLifecycleWork method runWork.
@Override
public final void runWork() {
IJobManager jobManager = ccs.getJobManager();
JobRun run = jobManager.get(jobId);
if (run != null) {
TaskId tid = taId.getTaskId();
Map<ActivityId, ActivityCluster> activityClusterMap = run.getActivityClusterGraph().getActivityMap();
ActivityCluster ac = activityClusterMap.get(tid.getActivityId());
if (ac != null) {
Map<ActivityId, ActivityPlan> taskStateMap = run.getActivityClusterPlanMap().get(ac.getId()).getActivityPlanMap();
Task[] taskStates = taskStateMap.get(tid.getActivityId()).getTasks();
if (taskStates != null && taskStates.length > tid.getPartition()) {
Task ts = taskStates[tid.getPartition()];
TaskCluster tc = ts.getTaskCluster();
List<TaskClusterAttempt> taskClusterAttempts = tc.getAttempts();
if (taskClusterAttempts != null && taskClusterAttempts.size() > taId.getAttempt()) {
TaskClusterAttempt tca = taskClusterAttempts.get(taId.getAttempt());
TaskAttempt ta = tca.getTaskAttempts().get(tid);
if (ta != null) {
performEvent(ta);
}
}
}
}
}
}
use of org.apache.hyracks.control.cc.job.JobRun in project asterixdb by apache.
the class GetActivityClusterGraphJSONWork method doRun.
@Override
protected void doRun() throws Exception {
IJobManager jobManager = ccs.getJobManager();
ObjectMapper om = new ObjectMapper();
JobRun run = jobManager.get(jobId);
if (run == null) {
json = om.createObjectNode();
return;
}
json = run.getActivityClusterGraph().toJSON();
}
use of org.apache.hyracks.control.cc.job.JobRun in project asterixdb by apache.
the class RegisterPartitionRequestWork method run.
@Override
public void run() {
PartitionId pid = partitionRequest.getPartitionId();
IJobManager jobManager = ccs.getJobManager();
JobRun run = jobManager.get(pid.getJobId());
if (run == null) {
return;
}
PartitionMatchMaker pmm = run.getPartitionMatchMaker();
Pair<PartitionDescriptor, PartitionRequest> match = pmm.matchPartitionRequest(partitionRequest);
if (match != null) {
try {
PartitionUtils.reportPartitionMatch(ccs, pid, match);
} catch (Exception e) {
e.printStackTrace();
}
}
}
Aggregations