use of org.apache.ignite.internal.processors.hadoop.HadoopTaskType in project ignite by apache.
the class HadoopExternalTaskExecutor method run.
/** {@inheritDoc} */
@SuppressWarnings("ConstantConditions")
@Override
public void run(final HadoopJobEx job, final Collection<HadoopTaskInfo> tasks) throws IgniteCheckedException {
if (!busyLock.tryReadLock()) {
if (log.isDebugEnabled())
log.debug("Failed to start hadoop tasks (grid is stopping, will ignore).");
return;
}
try {
HadoopProcess proc = runningProcsByJobId.get(job.id());
HadoopTaskType taskType = F.first(tasks).type();
if (taskType == HadoopTaskType.SETUP || taskType == HadoopTaskType.ABORT || taskType == HadoopTaskType.COMMIT) {
if (proc == null || proc.terminated()) {
runningProcsByJobId.remove(job.id(), proc);
// Start new process for ABORT task since previous processes were killed.
proc = startProcess(job, jobTracker.plan(job.id()));
if (log.isDebugEnabled())
log.debug("Starting new process for maintenance task [jobId=" + job.id() + ", proc=" + proc + ", taskType=" + taskType + ']');
}
} else
assert proc != null : "Missing started process for task execution request: " + job.id() + ", tasks=" + tasks;
final HadoopProcess proc0 = proc;
proc.initFut.listen(new CI1<IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>>>() {
@Override
public void apply(IgniteInternalFuture<IgniteBiTuple<Process, HadoopProcessDescriptor>> f) {
if (!busyLock.tryReadLock())
return;
try {
f.get();
proc0.addTasks(tasks);
if (log.isDebugEnabled())
log.debug("Sending task execution request to child process [jobId=" + job.id() + ", proc=" + proc0 + ", tasks=" + tasks + ']');
sendExecutionRequest(proc0, job, tasks);
} catch (IgniteCheckedException e) {
notifyTasksFailed(tasks, FAILED, e);
} finally {
busyLock.readUnlock();
}
}
});
} finally {
busyLock.readUnlock();
}
}
use of org.apache.ignite.internal.processors.hadoop.HadoopTaskType in project ignite by apache.
the class HadoopV2Job method getTaskContext.
/** {@inheritDoc} */
@SuppressWarnings({ "unchecked", "MismatchedQueryAndUpdateOfCollection" })
@Override
public HadoopTaskContext getTaskContext(HadoopTaskInfo info) throws IgniteCheckedException {
T2<HadoopTaskType, Integer> locTaskId = new T2<>(info.type(), info.taskNumber());
GridFutureAdapter<HadoopTaskContext> fut = ctxs.get(locTaskId);
if (fut != null)
return fut.get();
GridFutureAdapter<HadoopTaskContext> old = ctxs.putIfAbsent(locTaskId, fut = new GridFutureAdapter<>());
if (old != null)
return old.get();
Class<? extends HadoopTaskContext> cls = taskCtxClsPool.poll();
try {
if (cls == null) {
// If there is no pooled class, then load new one.
// Note that the classloader identified by the task it was initially created for,
// but later it may be reused for other tasks.
HadoopClassLoader ldr = sharedClsLdr != null ? sharedClsLdr : createClassLoader(HadoopClassLoader.nameForTask(info, false));
cls = (Class<? extends HadoopTaskContext>) ldr.loadClass(HadoopV2TaskContext.class.getName());
fullCtxClsQueue.add(cls);
}
Constructor<?> ctr = cls.getConstructor(HadoopTaskInfo.class, HadoopJobEx.class, HadoopJobId.class, UUID.class, DataInput.class);
if (jobConfData == null)
synchronized (jobConf) {
if (jobConfData == null) {
ByteArrayOutputStream buf = new ByteArrayOutputStream();
jobConf.write(new DataOutputStream(buf));
jobConfData = buf.toByteArray();
}
}
HadoopTaskContext res = (HadoopTaskContext) ctr.newInstance(info, this, jobId, locNodeId, new DataInputStream(new ByteArrayInputStream(jobConfData)));
fut.onDone(res);
return res;
} catch (Throwable e) {
IgniteCheckedException te = transformException(e);
fut.onDone(te);
if (e instanceof Error)
throw (Error) e;
throw te;
}
}
Aggregations