use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class RendezvousAffinityFunction method assignPartition.
/**
* Returns collection of nodes (primary first) for specified partition.
*
* @param part Partition.
* @param nodes Nodes.
* @param backups Number of backups.
* @param neighborhoodCache Neighborhood.
* @return Assignment.
*/
public List<ClusterNode> assignPartition(int part, List<ClusterNode> nodes, int backups, @Nullable Map<UUID, Collection<ClusterNode>> neighborhoodCache) {
if (nodes.size() <= 1)
return nodes;
IgniteBiTuple<Long, ClusterNode>[] hashArr = (IgniteBiTuple<Long, ClusterNode>[]) new IgniteBiTuple[nodes.size()];
for (int i = 0; i < nodes.size(); i++) {
ClusterNode node = nodes.get(i);
Object nodeHash = resolveNodeHash(node);
long hash = hash(nodeHash.hashCode(), part);
hashArr[i] = F.t(hash, node);
}
final int primaryAndBackups = backups == Integer.MAX_VALUE ? nodes.size() : Math.min(backups + 1, nodes.size());
Iterable<ClusterNode> sortedNodes = new LazyLinearSortedContainer(hashArr, primaryAndBackups);
// REPLICATED cache case
if (backups == Integer.MAX_VALUE)
return replicatedAssign(nodes, sortedNodes);
Iterator<ClusterNode> it = sortedNodes.iterator();
List<ClusterNode> res = new ArrayList<>(primaryAndBackups);
Collection<ClusterNode> allNeighbors = new HashSet<>();
ClusterNode primary = it.next();
res.add(primary);
if (exclNeighbors)
allNeighbors.addAll(neighborhoodCache.get(primary.id()));
// Select backups.
if (backups > 0) {
while (it.hasNext() && res.size() < primaryAndBackups) {
ClusterNode node = it.next();
if (exclNeighbors) {
if (!allNeighbors.contains(node)) {
res.add(node);
allNeighbors.addAll(neighborhoodCache.get(node.id()));
}
} else if ((backupFilter != null && backupFilter.apply(primary, node)) || (affinityBackupFilter != null && affinityBackupFilter.apply(node, res)) || (affinityBackupFilter == null && backupFilter == null)) {
res.add(node);
if (exclNeighbors)
allNeighbors.addAll(neighborhoodCache.get(node.id()));
}
}
}
if (res.size() < primaryAndBackups && nodes.size() >= primaryAndBackups && exclNeighbors) {
// Need to iterate again in case if there are no nodes which pass exclude neighbors backups criteria.
it = sortedNodes.iterator();
it.next();
while (it.hasNext() && res.size() < primaryAndBackups) {
ClusterNode node = it.next();
if (!res.contains(node))
res.add(node);
}
if (!exclNeighborsWarn) {
LT.warn(log, "Affinity function excludeNeighbors property is ignored " + "because topology has no enough nodes to assign backups.", "Affinity function excludeNeighbors property is ignored " + "because topology has no enough nodes to assign backups.");
exclNeighborsWarn = true;
}
}
assert res.size() <= primaryAndBackups;
return res;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridEventConsumeHandler method register.
/** {@inheritDoc} */
@Override
public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException {
assert nodeId != null;
assert routineId != null;
assert ctx != null;
if (cb != null)
ctx.resource().injectGeneric(cb);
if (filter != null)
ctx.resource().injectGeneric(filter);
if (filter instanceof PlatformEventFilterListener)
((PlatformEventFilterListener) filter).initialize(ctx);
final boolean loc = nodeId.equals(ctx.localNodeId());
lsnr = new GridLocalEventListener() {
/** node ID, routine ID, event */
private final Queue<T3<UUID, UUID, Event>> notificationQueue = new LinkedList<>();
private boolean notificationInProgress;
@Override
public void onEvent(Event evt) {
if (filter != null && !filter.apply(evt))
return;
if (loc) {
if (!cb.apply(nodeId, evt))
ctx.continuous().stopRoutine(routineId);
} else {
if (ctx.discovery().node(nodeId) == null)
return;
synchronized (notificationQueue) {
notificationQueue.add(new T3<>(nodeId, routineId, evt));
if (!notificationInProgress) {
ctx.getSystemExecutorService().execute(new Runnable() {
@Override
public void run() {
if (!ctx.continuous().lockStopping())
return;
try {
while (true) {
T3<UUID, UUID, Event> t3;
synchronized (notificationQueue) {
t3 = notificationQueue.poll();
if (t3 == null) {
notificationInProgress = false;
return;
}
}
try {
Event evt = t3.get3();
EventWrapper wrapper = new EventWrapper(evt);
if (evt instanceof CacheEvent) {
String cacheName = ((CacheEvent) evt).cacheName();
ClusterNode node = ctx.discovery().node(t3.get1());
if (node == null)
continue;
if (ctx.config().isPeerClassLoadingEnabled()) {
GridCacheContext cctx = ctx.cache().internalCache(cacheName).context();
if (cctx.deploymentEnabled() && ctx.discovery().cacheNode(node, cacheName)) {
wrapper.p2pMarshal(ctx.config().getMarshaller());
wrapper.cacheName = cacheName;
cctx.deploy().prepare(wrapper);
}
}
}
ctx.continuous().addNotification(t3.get1(), t3.get2(), wrapper, null, false, false);
} catch (ClusterTopologyCheckedException ignored) {
// No-op.
} catch (Throwable e) {
U.error(ctx.log(GridEventConsumeHandler.class), "Failed to send event notification to node: " + nodeId, e);
}
}
} finally {
ctx.continuous().unlockStopping();
}
}
});
notificationInProgress = true;
}
}
}
}
};
if (F.isEmpty(types))
types = EVTS_ALL;
ctx.event().addLocalEventListener(lsnr, types);
return RegisterStatus.REGISTERED;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class IgniteKernal method checkPhysicalRam.
/**
* Checks whether physical RAM is not exceeded.
*/
@SuppressWarnings("ConstantConditions")
private void checkPhysicalRam() {
long ram = ctx.discovery().localNode().attribute(ATTR_PHY_RAM);
if (ram != -1) {
String macs = ctx.discovery().localNode().attribute(ATTR_MACS);
long totalHeap = 0;
for (ClusterNode node : ctx.discovery().allNodes()) {
if (macs.equals(node.attribute(ATTR_MACS))) {
long heap = node.metrics().getHeapMemoryMaximum();
if (heap != -1)
totalHeap += heap;
}
}
if (totalHeap > ram) {
U.quietAndWarn(log, "Attempting to start more nodes than physical RAM " + "available on current host (this can cause significant slowdown)");
}
}
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class IgfsTask method map.
/** {@inheritDoc} */
@Nullable
@Override
public final Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable IgfsTaskArgs<T> args) {
assert ignite != null;
assert args != null;
IgniteFileSystem fs = ignite.fileSystem(args.igfsName());
IgfsProcessorAdapter igfsProc = ((IgniteKernal) ignite).context().igfs();
Map<ComputeJob, ClusterNode> splitMap = new HashMap<>();
Map<UUID, ClusterNode> nodes = mapSubgrid(subgrid);
for (IgfsPath path : args.paths()) {
IgfsFile file = fs.info(path);
if (file == null) {
if (args.skipNonExistentFiles())
continue;
else
throw new IgniteException("Failed to process IGFS file because it doesn't exist: " + path);
}
Collection<IgfsBlockLocation> aff = fs.affinity(path, 0, file.length(), args.maxRangeLength());
long totalLen = 0;
for (IgfsBlockLocation loc : aff) {
ClusterNode node = null;
for (UUID nodeId : loc.nodeIds()) {
node = nodes.get(nodeId);
if (node != null)
break;
}
if (node == null)
throw new IgniteException("Failed to find any of block affinity nodes in subgrid [loc=" + loc + ", subgrid=" + subgrid + ']');
IgfsJob job = createJob(path, new IgfsFileRange(file.path(), loc.start(), loc.length()), args);
if (job != null) {
ComputeJob jobImpl = igfsProc.createJob(job, fs.name(), file.path(), loc.start(), loc.length(), args.recordResolver());
splitMap.put(jobImpl, node);
}
totalLen += loc.length();
}
assert totalLen == file.length();
}
return splitMap;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class LocalIgfsSecondaryFileSystem method affinity.
/** {@inheritDoc} */
@Override
public Collection<IgfsBlockLocation> affinity(IgfsPath path, long start, long len, long maxLen) throws IgniteException {
File f = fileForPath(path);
if (!f.exists())
throw new IgfsPathNotFoundException("File not found: " + path);
// Create fake block & fake affinity for blocks
long blockSize = igfs.configuration().getBlockSize();
if (maxLen <= 0)
maxLen = Long.MAX_VALUE;
assert maxLen > 0 : "maxLen : " + maxLen;
long end = start + len;
Collection<IgfsBlockLocation> blocks = new ArrayList<>((int) (len / maxLen));
IgfsDataManager data = igfs.context().data();
Collection<ClusterNode> lastNodes = null;
long lastBlockIdx = -1;
IgfsBlockLocationImpl lastBlock = null;
for (long offset = start; offset < end; ) {
long blockIdx = offset / blockSize;
// Each step is min of maxLen and end of block.
long lenStep = Math.min(maxLen - (lastBlock != null ? lastBlock.length() : 0), (blockIdx + 1) * blockSize - offset);
lenStep = Math.min(lenStep, end - offset);
// Create fake affinity key to map blocks of secondary filesystem to nodes.
LocalFileSystemBlockKey affKey = new LocalFileSystemBlockKey(path, blockIdx);
if (blockIdx != lastBlockIdx) {
Collection<ClusterNode> nodes = data.affinityNodes(affKey);
if (!nodes.equals(lastNodes) && lastNodes != null && lastBlock != null) {
blocks.add(lastBlock);
lastBlock = null;
}
lastNodes = nodes;
lastBlockIdx = blockIdx;
}
if (lastBlock == null)
lastBlock = new IgfsBlockLocationImpl(offset, lenStep, lastNodes);
else
lastBlock.increaseLength(lenStep);
if (lastBlock.length() == maxLen || lastBlock.start() + lastBlock.length() == end) {
blocks.add(lastBlock);
lastBlock = null;
}
offset += lenStep;
}
return blocks;
}
Aggregations