use of org.apache.hyracks.control.cc.cluster.INodeManager in project asterixdb by apache.
the class PartitionUtils method reportPartitionMatch.
public static void reportPartitionMatch(ClusterControllerService ccs, final PartitionId pid, Pair<PartitionDescriptor, PartitionRequest> match) throws Exception {
PartitionDescriptor desc = match.getLeft();
PartitionRequest req = match.getRight();
INodeManager nodeManager = ccs.getNodeManager();
NodeControllerState producerNCS = nodeManager.getNodeControllerState(desc.getNodeId());
NodeControllerState requestorNCS = nodeManager.getNodeControllerState(req.getNodeId());
final NetworkAddress dataport = producerNCS.getDataPort();
final INodeController requestorNC = requestorNCS.getNodeController();
requestorNC.reportPartitionAvailability(pid, dataport);
}
use of org.apache.hyracks.control.cc.cluster.INodeManager in project asterixdb by apache.
the class AbstractHeartbeatWork method doRun.
@Override
public void doRun() {
INodeManager nodeManager = ccs.getNodeManager();
NodeControllerState state = nodeManager.getNodeControllerState(nodeId);
if (state != null) {
state.notifyHeartbeat(hbData);
}
runWork();
}
use of org.apache.hyracks.control.cc.cluster.INodeManager in project asterixdb by apache.
the class JobExecutor method startTasks.
private void startTasks(Map<String, List<TaskAttemptDescriptor>> taskAttemptMap) throws HyracksException {
final DeploymentId deploymentId = jobRun.getDeploymentId();
final JobId jobId = jobRun.getJobId();
final ActivityClusterGraph acg = jobRun.getActivityClusterGraph();
final Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = new HashMap<>(jobRun.getConnectorPolicyMap());
INodeManager nodeManager = ccs.getNodeManager();
try {
byte[] acgBytes = predistributed ? null : JavaSerializationUtils.serialize(acg);
for (Map.Entry<String, List<TaskAttemptDescriptor>> entry : taskAttemptMap.entrySet()) {
String nodeId = entry.getKey();
final List<TaskAttemptDescriptor> taskDescriptors = entry.getValue();
final NodeControllerState node = nodeManager.getNodeControllerState(nodeId);
if (node != null) {
node.getActiveJobIds().add(jobRun.getJobId());
boolean changed = jobRun.getParticipatingNodeIds().add(nodeId);
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Starting: " + taskDescriptors + " at " + entry.getKey());
}
byte[] jagBytes = changed ? acgBytes : null;
node.getNodeController().startTasks(deploymentId, jobId, jagBytes, taskDescriptors, connectorPolicies, jobRun.getFlags());
}
}
} catch (Exception e) {
throw new HyracksException(e);
}
}
use of org.apache.hyracks.control.cc.cluster.INodeManager in project asterixdb by apache.
the class JobExecutor method assignTaskLocations.
private void assignTaskLocations(TaskCluster tc, Map<String, List<TaskAttemptDescriptor>> taskAttemptMap) throws HyracksException {
ActivityClusterGraph acg = jobRun.getActivityClusterGraph();
Task[] tasks = tc.getTasks();
List<TaskClusterAttempt> tcAttempts = tc.getAttempts();
int attempts = tcAttempts.size();
TaskClusterAttempt tcAttempt = new TaskClusterAttempt(tc, attempts);
Map<TaskId, TaskAttempt> taskAttempts = new HashMap<>();
Map<TaskId, LValueConstraintExpression> locationMap = new HashMap<>();
for (int i = 0; i < tasks.length; ++i) {
Task ts = tasks[i];
TaskId tid = ts.getTaskId();
TaskAttempt taskAttempt = new TaskAttempt(tcAttempt, new TaskAttemptId(new TaskId(tid.getActivityId(), tid.getPartition()), attempts), ts);
taskAttempt.setStatus(TaskAttempt.TaskStatus.INITIALIZED, null);
locationMap.put(tid, new PartitionLocationExpression(tid.getActivityId().getOperatorDescriptorId(), tid.getPartition()));
taskAttempts.put(tid, taskAttempt);
}
tcAttempt.setTaskAttempts(taskAttempts);
solver.solve(locationMap.values());
for (int i = 0; i < tasks.length; ++i) {
Task ts = tasks[i];
TaskId tid = ts.getTaskId();
TaskAttempt taskAttempt = taskAttempts.get(tid);
String nodeId = assignLocation(acg, locationMap, tid, taskAttempt);
taskAttempt.setNodeId(nodeId);
taskAttempt.setStatus(TaskAttempt.TaskStatus.RUNNING, null);
taskAttempt.setStartTime(System.currentTimeMillis());
List<TaskAttemptDescriptor> tads = taskAttemptMap.get(nodeId);
if (tads == null) {
tads = new ArrayList<>();
taskAttemptMap.put(nodeId, tads);
}
OperatorDescriptorId opId = tid.getActivityId().getOperatorDescriptorId();
jobRun.registerOperatorLocation(opId, tid.getPartition(), nodeId);
ActivityPartitionDetails apd = ts.getActivityPlan().getActivityPartitionDetails();
TaskAttemptDescriptor tad = new TaskAttemptDescriptor(taskAttempt.getTaskAttemptId(), apd.getPartitionCount(), apd.getInputPartitionCounts(), apd.getOutputPartitionCounts());
tads.add(tad);
}
tcAttempt.initializePendingTaskCounter();
tcAttempts.add(tcAttempt);
/**
* Improvement for reducing master/slave message communications, for each TaskAttemptDescriptor,
* we set the NetworkAddress[][] partitionLocations, in which each row is for an incoming connector descriptor
* and each column is for an input channel of the connector.
*/
INodeManager nodeManager = ccs.getNodeManager();
for (Map.Entry<String, List<TaskAttemptDescriptor>> e : taskAttemptMap.entrySet()) {
List<TaskAttemptDescriptor> tads = e.getValue();
for (TaskAttemptDescriptor tad : tads) {
TaskAttemptId taid = tad.getTaskAttemptId();
int attempt = taid.getAttempt();
TaskId tid = taid.getTaskId();
ActivityId aid = tid.getActivityId();
List<IConnectorDescriptor> inConnectors = acg.getActivityInputs(aid);
int[] inPartitionCounts = tad.getInputPartitionCounts();
if (inPartitionCounts == null) {
continue;
}
NetworkAddress[][] partitionLocations = new NetworkAddress[inPartitionCounts.length][];
for (int i = 0; i < inPartitionCounts.length; ++i) {
ConnectorDescriptorId cdId = inConnectors.get(i).getConnectorId();
IConnectorPolicy policy = jobRun.getConnectorPolicyMap().get(cdId);
/**
* carry sender location information into a task
* when it is not the case that it is an re-attempt and the send-side
* is materialized blocking.
*/
if (attempt > 0 && policy.materializeOnSendSide() && policy.consumerWaitsForProducerToFinish()) {
continue;
}
ActivityId producerAid = acg.getProducerActivity(cdId);
partitionLocations[i] = new NetworkAddress[inPartitionCounts[i]];
for (int j = 0; j < inPartitionCounts[i]; ++j) {
TaskId producerTaskId = new TaskId(producerAid, j);
String nodeId = findTaskLocation(producerTaskId);
partitionLocations[i][j] = nodeManager.getNodeControllerState(nodeId).getDataPort();
}
}
tad.setInputPartitionLocations(partitionLocations);
}
}
tcAttempt.setStatus(TaskClusterAttempt.TaskClusterStatus.RUNNING);
tcAttempt.setStartTime(System.currentTimeMillis());
inProgressTaskClusters.add(tc);
}
use of org.apache.hyracks.control.cc.cluster.INodeManager in project asterixdb by apache.
the class JobManager method prepareComplete.
@Override
public void prepareComplete(JobRun run, JobStatus status, List<Exception> exceptions) throws HyracksException {
checkJob(run);
if (status == JobStatus.FAILURE_BEFORE_EXECUTION) {
run.setPendingStatus(JobStatus.FAILURE, exceptions);
finalComplete(run);
return;
}
JobId jobId = run.getJobId();
HyracksException caughtException = null;
if (run.getPendingStatus() != null && run.getCleanupPendingNodeIds().isEmpty()) {
finalComplete(run);
return;
}
if (run.getPendingStatus() != null) {
LOGGER.warning("Ignoring duplicate cleanup for JobRun with id: " + jobId);
return;
}
Set<String> targetNodes = run.getParticipatingNodeIds();
run.getCleanupPendingNodeIds().addAll(targetNodes);
if (run.getPendingStatus() != JobStatus.FAILURE && run.getPendingStatus() != JobStatus.TERMINATED) {
run.setPendingStatus(status, exceptions);
}
if (targetNodes != null && !targetNodes.isEmpty()) {
INodeManager nodeManager = ccs.getNodeManager();
Set<String> toDelete = new HashSet<>();
for (String n : targetNodes) {
NodeControllerState ncs = nodeManager.getNodeControllerState(n);
try {
if (ncs == null) {
toDelete.add(n);
} else {
ncs.getNodeController().cleanUpJoblet(jobId, status);
}
} catch (Exception e) {
LOGGER.log(Level.SEVERE, e.getMessage(), e);
if (caughtException == null) {
caughtException = new HyracksException(e);
} else {
caughtException.addSuppressed(e);
}
}
}
targetNodes.removeAll(toDelete);
run.getCleanupPendingNodeIds().removeAll(toDelete);
if (run.getCleanupPendingNodeIds().isEmpty()) {
finalComplete(run);
}
} else {
finalComplete(run);
}
// throws caught exceptions if any
if (caughtException != null) {
throw caughtException;
}
}
Aggregations