use of org.apache.ignite.internal.processors.hadoop.HadoopJobId in project ignite by apache.
the class HadoopJobTracker method onKernalStart.
/**
* {@inheritDoc}
*/
@SuppressWarnings("deprecation")
@Override
public void onKernalStart() throws IgniteCheckedException {
super.onKernalStart();
jobMetaCache().context().continuousQueries().executeInternalQuery(new CacheEntryUpdatedListener<HadoopJobId, HadoopJobMetadata>() {
@Override
public void onUpdated(final Iterable<CacheEntryEvent<? extends HadoopJobId, ? extends HadoopJobMetadata>> evts) {
if (!busyLock.tryReadLock())
return;
try {
// Must process query callback in a separate thread to avoid deadlocks.
evtProcSvc.execute(new EventHandler() {
@Override
protected void body() throws IgniteCheckedException {
processJobMetadataUpdates(evts);
}
});
} finally {
busyLock.readUnlock();
}
}
}, null, true, true, false);
ctx.kernalContext().event().addLocalEventListener(new GridLocalEventListener() {
@Override
public void onEvent(final Event evt) {
if (!busyLock.tryReadLock())
return;
try {
// Must process discovery callback in a separate thread to avoid deadlock.
evtProcSvc.execute(new EventHandler() {
@Override
protected void body() {
processNodeLeft((DiscoveryEvent) evt);
}
});
} finally {
busyLock.readUnlock();
}
}
}, EventType.EVT_NODE_FAILED, EventType.EVT_NODE_LEFT);
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobId in project ignite by apache.
the class HadoopProtocolJobCountersTask method run.
/**
* {@inheritDoc}
*/
@Override
public HadoopCounters run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException {
UUID nodeId = UUID.fromString(args.<String>get(0));
Integer id = args.get(1);
assert nodeId != null;
assert id != null;
return hadoop.counters(new HadoopJobId(nodeId, id));
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobId in project ignite by apache.
the class HadoopProtocolJobStatusTask method run.
/**
* {@inheritDoc}
*/
@Override
public HadoopJobStatus run(final ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException {
UUID nodeId = UUID.fromString(args.<String>get(0));
Integer id = args.get(1);
Long pollDelay = args.get(2);
assert nodeId != null;
assert id != null;
HadoopJobId jobId = new HadoopJobId(nodeId, id);
if (pollDelay == null)
pollDelay = DFLT_POLL_DELAY;
if (pollDelay > 0) {
IgniteInternalFuture<?> fut = hadoop.finishFuture(jobId);
if (fut != null) {
if (fut.isDone() || F.eq(jobCtx.getAttribute(ATTR_HELD), true))
return hadoop.status(jobId);
else {
fut.listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut0) {
jobCtx.callcc();
}
});
jobCtx.setAttribute(ATTR_HELD, true);
return jobCtx.holdcc(pollDelay);
}
} else
return null;
} else
return hadoop.status(jobId);
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobId in project ignite by apache.
the class HadoopProtocolKillJobTask method run.
/**
* {@inheritDoc}
*/
@Override
public Boolean run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException {
UUID nodeId = UUID.fromString(args.<String>get(0));
Integer id = args.get(1);
assert nodeId != null;
assert id != null;
HadoopJobId jobId = new HadoopJobId(nodeId, id);
return hadoop.kill(jobId);
}
use of org.apache.ignite.internal.processors.hadoop.HadoopJobId in project ignite by apache.
the class HadoopProtocolSubmitJobTask method run.
/**
* {@inheritDoc}
*/
@Override
public HadoopJobStatus run(ComputeJobContext jobCtx, Hadoop hadoop, HadoopProtocolTaskArguments args) throws IgniteCheckedException {
UUID nodeId = UUID.fromString(args.<String>get(0));
Integer id = args.get(1);
HadoopDefaultJobInfo info = args.get(2);
assert nodeId != null;
assert id != null;
assert info != null;
HadoopJobId jobId = new HadoopJobId(nodeId, id);
hadoop.submit(jobId, info);
HadoopJobStatus res = hadoop.status(jobId);
if (// Submission failed.
res == null)
res = new HadoopJobStatus(jobId, info.jobName(), info.user(), 0, 0, 0, 0, PHASE_CANCELLING, true, 1);
return res;
}
Aggregations