use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class JobExecutor method startTasks.
private void startTasks(Map<String, List<TaskAttemptDescriptor>> taskAttemptMap) throws HyracksException {
final DeploymentId deploymentId = jobRun.getDeploymentId();
final JobId jobId = jobRun.getJobId();
final ActivityClusterGraph acg = jobRun.getActivityClusterGraph();
final Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = new HashMap<>(jobRun.getConnectorPolicyMap());
INodeManager nodeManager = ccs.getNodeManager();
try {
byte[] acgBytes = predistributed ? null : JavaSerializationUtils.serialize(acg);
for (Map.Entry<String, List<TaskAttemptDescriptor>> entry : taskAttemptMap.entrySet()) {
String nodeId = entry.getKey();
final List<TaskAttemptDescriptor> taskDescriptors = entry.getValue();
final NodeControllerState node = nodeManager.getNodeControllerState(nodeId);
if (node != null) {
node.getActiveJobIds().add(jobRun.getJobId());
boolean changed = jobRun.getParticipatingNodeIds().add(nodeId);
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Starting: " + taskDescriptors + " at " + entry.getKey());
}
byte[] jagBytes = changed ? acgBytes : null;
node.getNodeController().startTasks(deploymentId, jobId, jagBytes, taskDescriptors, connectorPolicies, jobRun.getFlags());
}
}
} catch (Exception e) {
throw new HyracksException(e);
}
}
use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class JobExecutor method assignTaskLocations.
private void assignTaskLocations(TaskCluster tc, Map<String, List<TaskAttemptDescriptor>> taskAttemptMap) throws HyracksException {
ActivityClusterGraph acg = jobRun.getActivityClusterGraph();
Task[] tasks = tc.getTasks();
List<TaskClusterAttempt> tcAttempts = tc.getAttempts();
int attempts = tcAttempts.size();
TaskClusterAttempt tcAttempt = new TaskClusterAttempt(tc, attempts);
Map<TaskId, TaskAttempt> taskAttempts = new HashMap<>();
Map<TaskId, LValueConstraintExpression> locationMap = new HashMap<>();
for (int i = 0; i < tasks.length; ++i) {
Task ts = tasks[i];
TaskId tid = ts.getTaskId();
TaskAttempt taskAttempt = new TaskAttempt(tcAttempt, new TaskAttemptId(new TaskId(tid.getActivityId(), tid.getPartition()), attempts), ts);
taskAttempt.setStatus(TaskAttempt.TaskStatus.INITIALIZED, null);
locationMap.put(tid, new PartitionLocationExpression(tid.getActivityId().getOperatorDescriptorId(), tid.getPartition()));
taskAttempts.put(tid, taskAttempt);
}
tcAttempt.setTaskAttempts(taskAttempts);
solver.solve(locationMap.values());
for (int i = 0; i < tasks.length; ++i) {
Task ts = tasks[i];
TaskId tid = ts.getTaskId();
TaskAttempt taskAttempt = taskAttempts.get(tid);
String nodeId = assignLocation(acg, locationMap, tid, taskAttempt);
taskAttempt.setNodeId(nodeId);
taskAttempt.setStatus(TaskAttempt.TaskStatus.RUNNING, null);
taskAttempt.setStartTime(System.currentTimeMillis());
List<TaskAttemptDescriptor> tads = taskAttemptMap.get(nodeId);
if (tads == null) {
tads = new ArrayList<>();
taskAttemptMap.put(nodeId, tads);
}
OperatorDescriptorId opId = tid.getActivityId().getOperatorDescriptorId();
jobRun.registerOperatorLocation(opId, tid.getPartition(), nodeId);
ActivityPartitionDetails apd = ts.getActivityPlan().getActivityPartitionDetails();
TaskAttemptDescriptor tad = new TaskAttemptDescriptor(taskAttempt.getTaskAttemptId(), apd.getPartitionCount(), apd.getInputPartitionCounts(), apd.getOutputPartitionCounts());
tads.add(tad);
}
tcAttempt.initializePendingTaskCounter();
tcAttempts.add(tcAttempt);
/**
* Improvement for reducing master/slave message communications, for each TaskAttemptDescriptor,
* we set the NetworkAddress[][] partitionLocations, in which each row is for an incoming connector descriptor
* and each column is for an input channel of the connector.
*/
INodeManager nodeManager = ccs.getNodeManager();
for (Map.Entry<String, List<TaskAttemptDescriptor>> e : taskAttemptMap.entrySet()) {
List<TaskAttemptDescriptor> tads = e.getValue();
for (TaskAttemptDescriptor tad : tads) {
TaskAttemptId taid = tad.getTaskAttemptId();
int attempt = taid.getAttempt();
TaskId tid = taid.getTaskId();
ActivityId aid = tid.getActivityId();
List<IConnectorDescriptor> inConnectors = acg.getActivityInputs(aid);
int[] inPartitionCounts = tad.getInputPartitionCounts();
if (inPartitionCounts == null) {
continue;
}
NetworkAddress[][] partitionLocations = new NetworkAddress[inPartitionCounts.length][];
for (int i = 0; i < inPartitionCounts.length; ++i) {
ConnectorDescriptorId cdId = inConnectors.get(i).getConnectorId();
IConnectorPolicy policy = jobRun.getConnectorPolicyMap().get(cdId);
/**
* carry sender location information into a task
* when it is not the case that it is an re-attempt and the send-side
* is materialized blocking.
*/
if (attempt > 0 && policy.materializeOnSendSide() && policy.consumerWaitsForProducerToFinish()) {
continue;
}
ActivityId producerAid = acg.getProducerActivity(cdId);
partitionLocations[i] = new NetworkAddress[inPartitionCounts[i]];
for (int j = 0; j < inPartitionCounts[i]; ++j) {
TaskId producerTaskId = new TaskId(producerAid, j);
String nodeId = findTaskLocation(producerTaskId);
partitionLocations[i][j] = nodeManager.getNodeControllerState(nodeId).getDataPort();
}
}
tad.setInputPartitionLocations(partitionLocations);
}
}
tcAttempt.setStatus(TaskClusterAttempt.TaskClusterStatus.RUNNING);
tcAttempt.setStartTime(System.currentTimeMillis());
inProgressTaskClusters.add(tc);
}
use of org.apache.hyracks.api.dataflow.ConnectorDescriptorId in project asterixdb by apache.
the class JobSpecification method toJSON.
@SuppressWarnings("incomplete-switch")
public ObjectNode toJSON() throws IOException {
ObjectMapper om = new ObjectMapper();
ObjectNode jjob = om.createObjectNode();
ArrayNode jopArray = om.createArrayNode();
for (Map.Entry<OperatorDescriptorId, IOperatorDescriptor> e : opMap.entrySet()) {
ObjectNode op = e.getValue().toJSON();
if (!userConstraints.isEmpty()) {
// Add operator partition constraints to each JSON operator.
ObjectNode pcObject = om.createObjectNode();
ObjectNode pleObject = om.createObjectNode();
Iterator<Constraint> test = userConstraints.iterator();
while (test.hasNext()) {
Constraint constraint = test.next();
switch(constraint.getLValue().getTag()) {
case PARTITION_COUNT:
PartitionCountExpression pce = (PartitionCountExpression) constraint.getLValue();
if (e.getKey() == pce.getOperatorDescriptorId()) {
pcObject.put("count", getConstraintExpressionRValue(constraint));
}
break;
case PARTITION_LOCATION:
PartitionLocationExpression ple = (PartitionLocationExpression) constraint.getLValue();
if (e.getKey() == ple.getOperatorDescriptorId()) {
pleObject.put(Integer.toString(ple.getPartition()), getConstraintExpressionRValue(constraint));
}
break;
}
}
if (pleObject.size() > 0) {
pcObject.set("location", pleObject);
}
if (pcObject.size() > 0) {
op.set("partition-constraints", pcObject);
}
}
jopArray.add(op);
}
jjob.set("operators", jopArray);
ArrayNode jcArray = om.createArrayNode();
for (Map.Entry<ConnectorDescriptorId, IConnectorDescriptor> e : connMap.entrySet()) {
ObjectNode conn = om.createObjectNode();
Pair<Pair<IOperatorDescriptor, Integer>, Pair<IOperatorDescriptor, Integer>> connection = connectorOpMap.get(e.getKey());
if (connection != null) {
conn.put("in-operator-id", connection.getLeft().getLeft().getOperatorId().toString());
conn.put("in-operator-port", connection.getLeft().getRight().intValue());
conn.put("out-operator-id", connection.getRight().getLeft().getOperatorId().toString());
conn.put("out-operator-port", connection.getRight().getRight().intValue());
}
conn.set("connector", e.getValue().toJSON());
jcArray.add(conn);
}
jjob.set("connectors", jcArray);
return jjob;
}
Aggregations