use of org.apache.ignite.compute.ComputeJob in project ignite by apache.
the class IgniteKillTask method map.
/** {@inheritDoc} */
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Boolean restart) {
assert restart != null;
this.restart = restart;
Map<ComputeJob, ClusterNode> jobs = U.newHashMap(subgrid.size());
for (ClusterNode n : subgrid) if (!daemon(n))
jobs.put(new IgniteKillJob(), n);
return jobs;
}
use of org.apache.ignite.compute.ComputeJob in project ignite by apache.
the class GridContinuousMapperTask2 method map.
/** {@inheritDoc} */
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable int[] jobIds) {
Map<ComputeJob, ClusterNode> mappings = new HashMap<>(jobIds.length);
Iterator<ClusterNode> nodeIter = g.cluster().forRemotes().nodes().iterator();
for (int jobId : jobIds) {
ComputeJob job = new ComputeJobAdapter(jobId) {
@IgniteInstanceResource
private Ignite g;
@Override
public Object execute() {
Integer jobId = argument(0);
X.println(">>> Received job for ID: " + jobId);
return g.cache("replicated").localPeek(jobId, CachePeekMode.ONHEAP);
}
};
// If only local node in the grid.
if (g.cluster().nodes().size() == 1)
mappings.put(job, g.cluster().localNode());
else {
ClusterNode n = nodeIter.hasNext() ? nodeIter.next() : (nodeIter = g.cluster().forRemotes().nodes().iterator()).next();
mappings.put(job, n);
}
}
return mappings;
}
use of org.apache.ignite.compute.ComputeJob in project ignite by apache.
the class GridP2PTestTask method map.
/** {@inheritDoc} */
@Override
public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Object arg) {
assert subgrid != null;
assert !subgrid.isEmpty();
Integer arg1 = null;
if (arg instanceof GridifyArgument)
arg1 = (Integer) ((GridifyArgument) arg).getMethodParameters()[0];
else if (arg instanceof Integer)
arg1 = (Integer) arg;
else
assert false : "Failed to map task (unknown argument type) [type=" + arg.getClass() + ", val=" + arg + ']';
Map<ComputeJob, ClusterNode> map = new HashMap<>(subgrid.size());
UUID nodeId = ignite != null ? ignite.configuration().getNodeId() : null;
for (ClusterNode node : subgrid) if (!node.id().equals(nodeId))
map.put(new GridP2PTestJob(arg1), node);
return map;
}
use of org.apache.ignite.compute.ComputeJob in project ignite by apache.
the class GarHelloWorldTask method split.
/** {@inheritDoc} */
@Override
public Collection<? extends ComputeJob> split(int gridSize, String arg) throws IgniteException {
// Create Spring context.
AbstractBeanFactory fac = new XmlBeanFactory(new ClassPathResource("org/apache/ignite/spi/deployment/uri/tasks/gar-spring-bean.xml", getClass().getClassLoader()));
fac.setBeanClassLoader(getClass().getClassLoader());
// Load imported bean from GAR/lib folder.
GarHelloWorldBean bean = (GarHelloWorldBean) fac.getBean("example.bean");
String msg = bean.getMessage(arg);
assert msg != null;
// Split the passed in phrase into multiple words separated by spaces.
List<String> words = Arrays.asList(msg.split(" "));
Collection<ComputeJob> jobs = new ArrayList<>(words.size());
// Use imperative OOP APIs.
for (String word : words) {
// Every job gets its own word as an argument.
jobs.add(new ComputeJobAdapter(word) {
/*
* Simply prints the job's argument.
*/
@Nullable
@Override
public Serializable execute() {
System.out.println(">>>");
System.out.println(">>> Printing '" + argument(0) + "' on this node from grid job.");
System.out.println(">>>");
// This job does not return any result.
return null;
}
});
}
return jobs;
}
use of org.apache.ignite.compute.ComputeJob in project ignite by apache.
the class PlatformFullTask method read.
/**
* Read map result.
*
* @param reader Reader.
* @param nodes Current topology nodes.
* @return Map result.
*/
private Map<ComputeJob, ClusterNode> read(BinaryRawReaderEx reader, Collection<ClusterNode> nodes) {
if (reader.readBoolean()) {
if (!reader.readBoolean())
return null;
int size = reader.readInt();
Map<ComputeJob, ClusterNode> map = U.newHashMap(size);
for (int i = 0; i < size; i++) {
long ptr = reader.readLong();
Object nativeJob = reader.readBoolean() ? reader.readObjectDetached() : null;
PlatformJob job = ctx.createJob(this, ptr, nativeJob);
UUID jobNodeId = reader.readUuid();
assert jobNodeId != null;
ClusterNode jobNode = ctx.kernalContext().discovery().node(jobNodeId);
if (jobNode == null) {
// We expect task processor to perform necessary failover.
for (ClusterNode node : nodes) {
if (node.id().equals(jobNodeId)) {
jobNode = node;
break;
}
}
assert jobNode != null;
}
map.put(job, jobNode);
}
return map;
} else
throw new IgniteException(reader.readString());
}
Aggregations