use of org.apache.hyracks.api.dataflow.ActivityId in project asterixdb by apache.
the class StartTasksWork method run.
@Override
public void run() {
Task task = null;
try {
NCServiceContext serviceCtx = ncs.getContext();
Joblet joblet = getOrCreateLocalJoblet(deploymentId, jobId, serviceCtx, acgBytes);
final ActivityClusterGraph acg = joblet.getActivityClusterGraph();
IRecordDescriptorProvider rdp = new IRecordDescriptorProvider() {
@Override
public RecordDescriptor getOutputRecordDescriptor(ActivityId aid, int outputIndex) {
ActivityCluster ac = acg.getActivityMap().get(aid);
IConnectorDescriptor conn = ac.getActivityOutputMap().get(aid).get(outputIndex);
return ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
}
@Override
public RecordDescriptor getInputRecordDescriptor(ActivityId aid, int inputIndex) {
ActivityCluster ac = acg.getActivityMap().get(aid);
IConnectorDescriptor conn = ac.getActivityInputMap().get(aid).get(inputIndex);
return ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
}
};
for (TaskAttemptDescriptor td : taskDescriptors) {
TaskAttemptId taId = td.getTaskAttemptId();
TaskId tid = taId.getTaskId();
ActivityId aid = tid.getActivityId();
ActivityCluster ac = acg.getActivityMap().get(aid);
IActivity han = ac.getActivityMap().get(aid);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Initializing " + taId + " -> " + han);
}
final int partition = tid.getPartition();
List<IConnectorDescriptor> inputs = ac.getActivityInputMap().get(aid);
task = new Task(joblet, taId, han.getClass().getName(), ncs.getExecutor(), ncs, createInputChannels(td, inputs));
IOperatorNodePushable operator = han.createPushRuntime(task, rdp, partition, td.getPartitionCount());
List<IPartitionCollector> collectors = new ArrayList<>();
if (inputs != null) {
for (int i = 0; i < inputs.size(); ++i) {
IConnectorDescriptor conn = inputs.get(i);
IConnectorPolicy cPolicy = connectorPoliciesMap.get(conn.getConnectorId());
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("input: " + i + ": " + conn.getConnectorId());
}
RecordDescriptor recordDesc = ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
IPartitionCollector collector = createPartitionCollector(td, partition, task, i, conn, recordDesc, cPolicy);
collectors.add(collector);
}
}
List<IConnectorDescriptor> outputs = ac.getActivityOutputMap().get(aid);
if (outputs != null) {
for (int i = 0; i < outputs.size(); ++i) {
final IConnectorDescriptor conn = outputs.get(i);
RecordDescriptor recordDesc = ac.getConnectorRecordDescriptorMap().get(conn.getConnectorId());
IConnectorPolicy cPolicy = connectorPoliciesMap.get(conn.getConnectorId());
IPartitionWriterFactory pwFactory = createPartitionWriterFactory(task, cPolicy, jobId, conn, partition, taId, flags);
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("output: " + i + ": " + conn.getConnectorId());
}
IFrameWriter writer = conn.createPartitioner(task, recordDesc, pwFactory, partition, td.getPartitionCount(), td.getOutputPartitionCounts()[i]);
operator.setOutputFrameWriter(i, writer, recordDesc);
}
}
task.setTaskRuntime(collectors.toArray(new IPartitionCollector[collectors.size()]), operator);
joblet.addTask(task);
task.start();
}
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Failure starting a task", e);
// notify cc of start task failure
List<Exception> exceptions = new ArrayList<>();
ExceptionUtils.setNodeIds(exceptions, ncs.getId());
ncs.getWorkQueue().schedule(new NotifyTaskFailureWork(ncs, task, exceptions));
}
}
use of org.apache.hyracks.api.dataflow.ActivityId in project asterixdb by apache.
the class ThreadCountingOperatorDescriptor method contributeActivities.
@Override
public void contributeActivities(IActivityGraphBuilder builder) {
ThreadCountingActivityNode tca = new ThreadCountingActivityNode(new ActivityId(getOperatorId(), 0));
builder.addActivity(this, tca);
for (int i = 0; i < inputArity; ++i) {
builder.addSourceEdge(i, tca, i);
}
}
use of org.apache.hyracks.api.dataflow.ActivityId in project asterixdb by apache.
the class ExceptionRaisingOperatorDescriptor method contributeActivities.
@Override
public void contributeActivities(IActivityGraphBuilder builder) {
ExceptionRaisingActivityNode tca = new ExceptionRaisingActivityNode(new ActivityId(getOperatorId(), 0));
builder.addActivity(this, tca);
for (int i = 0; i < inputArity; ++i) {
builder.addSourceEdge(i, tca, i);
}
}
use of org.apache.hyracks.api.dataflow.ActivityId in project asterixdb by apache.
the class TestUtils method create.
public static IHyracksTaskContext create(int frameSize) {
try {
IOManager ioManager = createIoManager();
INCServiceContext serviceCtx = new TestNCServiceContext(ioManager, null);
TestJobletContext jobletCtx = new TestJobletContext(frameSize, serviceCtx, new JobId(0));
TaskAttemptId tid = new TaskAttemptId(new TaskId(new ActivityId(new OperatorDescriptorId(0), 0), 0), 0);
IHyracksTaskContext taskCtx = new TestTaskContext(jobletCtx, tid);
return taskCtx;
} catch (HyracksException e) {
throw new RuntimeException(e);
}
}
use of org.apache.hyracks.api.dataflow.ActivityId in project asterixdb by apache.
the class ActivityClusterPlanner method buildConnectorPolicyAwareTaskClusters.
private TaskCluster[] buildConnectorPolicyAwareTaskClusters(ActivityCluster ac, Map<ActivityId, ActivityPlan> activityPlanMap, Map<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> taskConnectivity) {
Map<TaskId, Set<TaskId>> taskClusterMap = new HashMap<>();
for (ActivityId anId : ac.getActivityMap().keySet()) {
ActivityPlan ap = activityPlanMap.get(anId);
Task[] tasks = ap.getTasks();
for (Task t : tasks) {
Set<TaskId> cluster = new HashSet<>();
TaskId tid = t.getTaskId();
cluster.add(tid);
taskClusterMap.put(tid, cluster);
}
}
JobRun jobRun = executor.getJobRun();
Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = jobRun.getConnectorPolicyMap();
for (Map.Entry<TaskId, List<Pair<TaskId, ConnectorDescriptorId>>> e : taskConnectivity.entrySet()) {
Set<TaskId> cluster = taskClusterMap.get(e.getKey());
for (Pair<TaskId, ConnectorDescriptorId> p : e.getValue()) {
IConnectorPolicy cPolicy = connectorPolicies.get(p.getRight());
if (cPolicy.requiresProducerConsumerCoscheduling()) {
cluster.add(p.getLeft());
}
}
}
/*
* We compute the transitive closure of this (producer-consumer) relation to find the largest set of
* tasks that need to be co-scheduled.
*/
int counter = 0;
TaskId[] ordinalList = new TaskId[taskClusterMap.size()];
Map<TaskId, Integer> ordinalMap = new HashMap<>();
for (TaskId tid : taskClusterMap.keySet()) {
ordinalList[counter] = tid;
ordinalMap.put(tid, counter);
++counter;
}
int n = ordinalList.length;
BitSet[] paths = new BitSet[n];
for (Map.Entry<TaskId, Set<TaskId>> e : taskClusterMap.entrySet()) {
int i = ordinalMap.get(e.getKey());
BitSet bsi = paths[i];
if (bsi == null) {
bsi = new BitSet(n);
paths[i] = bsi;
}
for (TaskId ttid : e.getValue()) {
int j = ordinalMap.get(ttid);
paths[i].set(j);
BitSet bsj = paths[j];
if (bsj == null) {
bsj = new BitSet(n);
paths[j] = bsj;
}
bsj.set(i);
}
}
for (int k = 0; k < n; ++k) {
for (int i = paths[k].nextSetBit(0); i >= 0; i = paths[k].nextSetBit(i + 1)) {
for (int j = paths[i].nextClearBit(0); j < n && j >= 0; j = paths[i].nextClearBit(j + 1)) {
paths[i].set(j, paths[k].get(j));
paths[j].set(i, paths[i].get(j));
}
}
}
BitSet pending = new BitSet(n);
pending.set(0, n);
List<List<TaskId>> clusters = new ArrayList<>();
for (int i = pending.nextSetBit(0); i >= 0; i = pending.nextSetBit(i)) {
List<TaskId> cluster = new ArrayList<>();
for (int j = paths[i].nextSetBit(0); j >= 0; j = paths[i].nextSetBit(j + 1)) {
cluster.add(ordinalList[j]);
pending.clear(j);
}
clusters.add(cluster);
}
List<TaskCluster> tcSet = new ArrayList<>();
counter = 0;
for (List<TaskId> cluster : clusters) {
List<Task> taskStates = new ArrayList<>();
for (TaskId tid : cluster) {
taskStates.add(activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()]);
}
TaskCluster tc = new TaskCluster(new TaskClusterId(ac.getId(), counter++), ac, taskStates.toArray(new Task[taskStates.size()]));
tcSet.add(tc);
for (TaskId tid : cluster) {
activityPlanMap.get(tid.getActivityId()).getTasks()[tid.getPartition()].setTaskCluster(tc);
}
}
return tcSet.toArray(new TaskCluster[tcSet.size()]);
}
Aggregations