use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridClientPartitionTopology method nodes.
/**
* {@inheritDoc}
*/
@Override
public List<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
lock.readLock().lock();
try {
assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer=" + topVer + ", node2part=" + node2part + ']';
List<ClusterNode> nodes = null;
Collection<UUID> nodeIds = part2node.get(p);
if (!F.isEmpty(nodeIds)) {
for (UUID nodeId : nodeIds) {
ClusterNode n = discoCache.node(nodeId);
if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
if (nodes == null)
nodes = new ArrayList<>(nodeIds.size());
nodes.add(n);
}
}
}
return nodes;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridClientPartitionTopology method nodes.
/**
* @param p Partition.
* @param topVer Topology version ({@code -1} for all nodes).
* @param state Partition state.
* @param states Additional partition states.
* @return List of nodes for the partition.
*/
private List<ClusterNode> nodes(int p, AffinityTopologyVersion topVer, GridDhtPartitionState state, GridDhtPartitionState... states) {
Collection<UUID> allIds = F.nodeIds(discoCache.cacheGroupAffinityNodes(grpId));
lock.readLock().lock();
try {
assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer=" + topVer + ", allIds=" + allIds + ", node2part=" + node2part + ']';
Collection<UUID> nodeIds = part2node.get(p);
// Node IDs can be null if both, primary and backup, nodes disappear.
int size = nodeIds == null ? 0 : nodeIds.size();
if (size == 0)
return Collections.emptyList();
List<ClusterNode> nodes = new ArrayList<>(size);
for (UUID id : nodeIds) {
if (topVer.topologyVersion() > 0 && !F.contains(allIds, id))
continue;
if (hasState(p, id, state, states)) {
ClusterNode n = discoCache.node(id);
if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
nodes.add(n);
}
}
return nodes;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridServiceProcessorProxySelfTest method testRemoteStickyProxyInvocation.
/**
* @throws Exception If failed.
*/
public void testRemoteStickyProxyInvocation() throws Exception {
final String name = "testRemoteStickyProxyInvocation";
final Ignite ignite = grid(0);
ignite.services().deployNodeSingleton(name, new MapServiceImpl<String, Integer>());
// Get remote proxy.
MapService<Integer, String> svc = ignite.services(ignite.cluster().forRemotes()).serviceProxy(name, MapService.class, true);
// Make sure service is a local instance.
assertFalse(svc instanceof Service);
for (int i = 0; i < nodeCount(); i++) svc.put(i, Integer.toString(i));
int size = 0;
for (ClusterNode n : ignite.cluster().forRemotes().nodes()) {
MapService<Integer, String> map = ignite.services(ignite.cluster().forNode(n)).serviceProxy(name, MapService.class, false);
// Make sure service is a local instance.
assertFalse(map instanceof Service);
if (map.size() != 0)
size += map.size();
}
assertEquals(nodeCount(), size);
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridServiceProcessorBatchDeploySelfTest method _testDeployAllTopologyChangeFail.
/**
* TODO: enable when IGNITE-6259 is fixed.
*
* @throws Exception If failed.
*/
public void _testDeployAllTopologyChangeFail() throws Exception {
final Ignite client = grid(CLIENT_NODE_NAME);
final AtomicBoolean finished = new AtomicBoolean();
IgniteInternalFuture<Object> topChangeFut = runTopChanger(finished);
try {
int numServices = 500;
int batchSize = 5;
CountDownLatch latch = new CountDownLatch(numServices);
IgnitePredicate<ClusterNode> depPred = client.cluster().forServers().forPredicate(new IgnitePredicate<ClusterNode>() {
@Override
public boolean apply(ClusterNode node) {
String gridName = node.attribute(IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME);
assert gridName != null;
return gridName.startsWith(getTestIgniteInstanceName());
}
}).predicate();
List<ServiceConfiguration> cfgs = getConfigs(depPred, numServices);
List<ServiceConfiguration> failingCfgs = new ArrayList<>();
subscribeExeLatch(cfgs, latch);
int from = 0;
while (from < numServices) {
int to = Math.min(numServices, from + batchSize);
List<ServiceConfiguration> cfgsBatch = cfgs.subList(from, to);
ServiceConfiguration failingCfg = cfgsBatch.get(0);
failingCfg.setName(null);
failingCfgs.add(failingCfg);
try {
client.services().deployAllAsync(cfgsBatch).get(5000);
fail("Should never reach here.");
} catch (ServiceDeploymentException e) {
assertEquals(1, e.getFailedConfigurations().size());
ServiceConfiguration actFailedCfg = copyService(e.getFailedConfigurations().iterator().next());
assertEquals(failingCfg, actFailedCfg);
latch.countDown();
}
from = to;
}
assertTrue(latch.await(30, TimeUnit.SECONDS));
cfgs.removeAll(failingCfgs);
assertDeployedServices(client, cfgs);
} finally {
finished.set(true);
}
topChangeFut.get();
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridDhtAtomicAbstractUpdateFuture method addWriteEntry.
/**
* @param affAssignment Affinity assignment.
* @param entry Entry to map.
* @param val Value to write.
* @param entryProcessor Entry processor.
* @param ttl TTL (optional).
* @param conflictExpireTime Conflict expire time (optional).
* @param conflictVer Conflict version (optional).
* @param addPrevVal If {@code true} sends previous value to backups.
* @param prevVal Previous value.
* @param updateCntr Partition update counter.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
final void addWriteEntry(AffinityAssignment affAssignment, GridDhtCacheEntry entry, @Nullable CacheObject val, EntryProcessor<Object, Object, Object> entryProcessor, long ttl, long conflictExpireTime, @Nullable GridCacheVersion conflictVer, boolean addPrevVal, @Nullable CacheObject prevVal, long updateCntr) {
AffinityTopologyVersion topVer = updateReq.topologyVersion();
List<ClusterNode> affNodes = affAssignment.get(entry.partition());
// Client has seen that rebalancing finished, it is safe to use affinity mapping.
List<ClusterNode> dhtNodes = updateReq.affinityMapping() ? affNodes : cctx.dht().topology().nodes(entry.partition(), affAssignment, affNodes);
if (dhtNodes == null)
dhtNodes = affNodes;
if (log.isDebugEnabled())
log.debug("Mapping entry to DHT nodes [nodes=" + U.nodeIds(dhtNodes) + ", entry=" + entry + ']');
CacheWriteSynchronizationMode syncMode = updateReq.writeSynchronizationMode();
addDhtKey(entry.key(), dhtNodes);
for (int i = 0; i < dhtNodes.size(); i++) {
ClusterNode node = dhtNodes.get(i);
UUID nodeId = node.id();
if (!nodeId.equals(cctx.localNodeId())) {
GridDhtAtomicAbstractUpdateRequest updateReq = mappings.get(nodeId);
if (updateReq == null) {
updateReq = createRequest(node.id(), futId, writeVer, syncMode, topVer, ttl, conflictExpireTime, conflictVer);
mappings.put(nodeId, updateReq);
}
updateReq.addWriteValue(entry.key(), val, entryProcessor, ttl, conflictExpireTime, conflictVer, addPrevVal, prevVal, updateCntr);
}
}
}
Aggregations