use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class DatasetPartitionManager method createDatasetPartitionWriter.
@Override
public IFrameWriter createDatasetPartitionWriter(IHyracksTaskContext ctx, ResultSetId rsId, boolean orderedResult, boolean asyncMode, int partition, int nPartitions) throws HyracksException {
DatasetPartitionWriter dpw;
JobId jobId = ctx.getJobletContext().getJobId();
synchronized (this) {
dpw = new DatasetPartitionWriter(ctx, this, jobId, rsId, asyncMode, orderedResult, partition, nPartitions, datasetMemoryManager, fileFactory);
ResultSetMap rsIdMap = (ResultSetMap) partitionResultStateMap.computeIfAbsent(jobId, k -> new ResultSetMap());
ResultState[] resultStates = rsIdMap.createOrGetResultStates(rsId, nPartitions);
resultStates[partition] = dpw.getResultState();
}
LOGGER.fine("Initialized partition writer: JobId: " + jobId + ":partition: " + partition);
return dpw;
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class NetworkManager method readInitialMessage.
private static PartitionId readInitialMessage(ByteBuffer buffer) {
JobId jobId = new JobId(buffer.getLong());
ConnectorDescriptorId cdid = new ConnectorDescriptorId(buffer.getInt());
int senderIndex = buffer.getInt();
int receiverIndex = buffer.getInt();
return new PartitionId(jobId, cdid, senderIndex, receiverIndex);
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class Waiter method print.
public PrintWriter print(PrintWriter pw) {
for (JobId jId : getJobIds()) {
pw.print(jId.toString());
pw.print(" - ");
pw.println(String.valueOf(getDatasetJobRecord(jId)));
}
pw.flush();
return pw;
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class JobExecutor method startTasks.
private void startTasks(Map<String, List<TaskAttemptDescriptor>> taskAttemptMap) throws HyracksException {
final DeploymentId deploymentId = jobRun.getDeploymentId();
final JobId jobId = jobRun.getJobId();
final ActivityClusterGraph acg = jobRun.getActivityClusterGraph();
final Map<ConnectorDescriptorId, IConnectorPolicy> connectorPolicies = new HashMap<>(jobRun.getConnectorPolicyMap());
INodeManager nodeManager = ccs.getNodeManager();
try {
byte[] acgBytes = predistributed ? null : JavaSerializationUtils.serialize(acg);
for (Map.Entry<String, List<TaskAttemptDescriptor>> entry : taskAttemptMap.entrySet()) {
String nodeId = entry.getKey();
final List<TaskAttemptDescriptor> taskDescriptors = entry.getValue();
final NodeControllerState node = nodeManager.getNodeControllerState(nodeId);
if (node != null) {
node.getActiveJobIds().add(jobRun.getJobId());
boolean changed = jobRun.getParticipatingNodeIds().add(nodeId);
if (LOGGER.isLoggable(Level.FINE)) {
LOGGER.fine("Starting: " + taskDescriptors + " at " + entry.getKey());
}
byte[] jagBytes = changed ? acgBytes : null;
node.getNodeController().startTasks(deploymentId, jobId, jagBytes, taskDescriptors, connectorPolicies, jobRun.getFlags());
}
}
} catch (Exception e) {
throw new HyracksException(e);
}
}
use of org.apache.hyracks.api.job.JobId in project asterixdb by apache.
the class JobManager method prepareComplete.
@Override
public void prepareComplete(JobRun run, JobStatus status, List<Exception> exceptions) throws HyracksException {
checkJob(run);
if (status == JobStatus.FAILURE_BEFORE_EXECUTION) {
run.setPendingStatus(JobStatus.FAILURE, exceptions);
finalComplete(run);
return;
}
JobId jobId = run.getJobId();
HyracksException caughtException = null;
if (run.getPendingStatus() != null && run.getCleanupPendingNodeIds().isEmpty()) {
finalComplete(run);
return;
}
if (run.getPendingStatus() != null) {
LOGGER.warning("Ignoring duplicate cleanup for JobRun with id: " + jobId);
return;
}
Set<String> targetNodes = run.getParticipatingNodeIds();
run.getCleanupPendingNodeIds().addAll(targetNodes);
if (run.getPendingStatus() != JobStatus.FAILURE && run.getPendingStatus() != JobStatus.TERMINATED) {
run.setPendingStatus(status, exceptions);
}
if (targetNodes != null && !targetNodes.isEmpty()) {
INodeManager nodeManager = ccs.getNodeManager();
Set<String> toDelete = new HashSet<>();
for (String n : targetNodes) {
NodeControllerState ncs = nodeManager.getNodeControllerState(n);
try {
if (ncs == null) {
toDelete.add(n);
} else {
ncs.getNodeController().cleanUpJoblet(jobId, status);
}
} catch (Exception e) {
LOGGER.log(Level.SEVERE, e.getMessage(), e);
if (caughtException == null) {
caughtException = new HyracksException(e);
} else {
caughtException.addSuppressed(e);
}
}
}
targetNodes.removeAll(toDelete);
run.getCleanupPendingNodeIds().removeAll(toDelete);
if (run.getCleanupPendingNodeIds().isEmpty()) {
finalComplete(run);
}
} else {
finalComplete(run);
}
// throws caught exceptions if any
if (caughtException != null) {
throw caughtException;
}
}
Aggregations