use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method assignPartitionStates.
/**
* @param top Topology to assign.
*/
private void assignPartitionStates(GridDhtPartitionTopology top) {
Map<Integer, CounterWithNodes> maxCntrs = new HashMap<>();
for (Map.Entry<UUID, GridDhtPartitionsAbstractMessage> e : msgs.entrySet()) {
assert e.getValue().partitionUpdateCounters(top.cacheId()) != null;
for (Map.Entry<Integer, T2<Long, Long>> e0 : e.getValue().partitionUpdateCounters(top.cacheId()).entrySet()) {
int p = e0.getKey();
UUID uuid = e.getKey();
GridDhtPartitionState state = top.partitionState(uuid, p);
if (state != GridDhtPartitionState.OWNING)
continue;
Long cntr = e0.getValue().get1();
if (cntr == null)
continue;
CounterWithNodes maxCntr = maxCntrs.get(p);
if (maxCntr == null || cntr > maxCntr.cnt)
maxCntrs.put(p, new CounterWithNodes(cntr, uuid));
else if (cntr == maxCntr.cnt)
maxCntr.nodes.add(uuid);
}
}
// Also must process counters from the local node.
for (GridDhtLocalPartition part : top.currentLocalPartitions()) {
GridDhtPartitionState state = top.partitionState(cctx.localNodeId(), part.id());
if (state != GridDhtPartitionState.OWNING)
continue;
CounterWithNodes maxCntr = maxCntrs.get(part.id());
if (maxCntr == null || part.initialUpdateCounter() > maxCntr.cnt)
maxCntrs.put(part.id(), new CounterWithNodes(part.updateCounter(), cctx.localNodeId()));
else if (part.initialUpdateCounter() == maxCntr.cnt)
maxCntr.nodes.add(cctx.localNodeId());
}
int entryLeft = maxCntrs.size();
for (Map.Entry<Integer, CounterWithNodes> e : maxCntrs.entrySet()) {
int p = e.getKey();
long maxCntr = e.getValue().cnt;
entryLeft--;
if (entryLeft != 0 && maxCntr == 0)
continue;
top.setOwners(p, e.getValue().nodes, entryLeft == 0);
}
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridDhtPartitionTopologyImpl method updateCounters.
/** {@inheritDoc} */
@Override
public Map<Integer, T2<Long, Long>> updateCounters(boolean skipZeros) {
lock.readLock().lock();
try {
Map<Integer, T2<Long, Long>> res;
if (skipZeros) {
res = U.newHashMap(cntrMap.size());
for (Map.Entry<Integer, T2<Long, Long>> e : cntrMap.entrySet()) {
Long cntr = e.getValue().get2();
if (ZERO.equals(cntr))
continue;
res.put(e.getKey(), e.getValue());
}
} else
res = new HashMap<>(cntrMap);
for (int i = 0; i < locParts.length(); i++) {
GridDhtLocalPartition part = locParts.get(i);
if (part == null)
continue;
T2<Long, Long> cntr0 = res.get(part.id());
Long initCntr = part.initialUpdateCounter();
if (cntr0 == null || initCntr >= cntr0.get1()) {
if (skipZeros && initCntr == 0L && part.updateCounter() == 0L)
continue;
res.put(part.id(), new T2<>(initCntr, part.updateCounter()));
}
}
return res;
} finally {
lock.readLock().unlock();
}
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class JettyRestProcessorAbstractSelfTest method testTuple.
/**
* @throws Exception If failed.
*/
public void testTuple() throws Exception {
T2 t = new T2("key", "value");
jcache().put("tupleKey", t);
String ret = content(F.asMap("cacheName", DEFAULT_CACHE_NAME, "cmd", GridRestCommand.CACHE_GET.key(), "key", "tupleKey"));
info("Get command result: " + ret);
JsonNode res = jsonCacheOperationResponse(ret, false);
assertEquals(t.getKey(), res.get("key").asText());
assertEquals(t.getValue(), res.get("value").asText());
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCachePartitionExchangeManager method addFullPartitionsMap.
/**
* @param m Message.
* @param dupData Duplicated data map.
* @param compress {@code True} if need check for duplicated partition state data.
* @param cacheId Cache ID.
* @param map Map to add.
* @param affKey Cache affinity key.
*/
private void addFullPartitionsMap(GridDhtPartitionsFullMessage m, Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData, boolean compress, Integer cacheId, GridDhtPartitionFullMap map, Object affKey) {
Integer dupDataCache = null;
if (compress && affKey != null && !m.containsCache(cacheId)) {
T2<Integer, GridDhtPartitionFullMap> state0 = dupData.get(affKey);
if (state0 != null && state0.get2().partitionStateEquals(map)) {
GridDhtPartitionFullMap map0 = new GridDhtPartitionFullMap(map.nodeId(), map.nodeOrder(), map.updateSequence());
for (Map.Entry<UUID, GridDhtPartitionMap> e : map.entrySet()) map0.put(e.getKey(), e.getValue().emptyCopy());
map = map0;
dupDataCache = state0.get1();
} else
dupData.put(affKey, new T2<>(cacheId, map));
}
m.addFullPartitionsMap(cacheId, map, dupDataCache);
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param nodes Target nodes.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param compress {@code True} if it is possible to use compression for message.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(Collection<ClusterNode> nodes, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, final boolean compress) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
cctx.forAllCaches(new IgniteInClosure<GridCacheContext>() {
@Override
public void apply(GridCacheContext cacheCtx) {
if (!cacheCtx.isLocal()) {
boolean ready;
if (exchId != null) {
AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion();
ready = startTopVer.compareTo(exchId.topologyVersion()) <= 0;
} else
ready = cacheCtx.started();
if (ready) {
GridAffinityAssignmentCache affCache = cacheCtx.affinity().affinityCache();
GridDhtPartitionFullMap locMap = cacheCtx.topology().partitionMap(true);
addFullPartitionsMap(m, dupData, compress, cacheCtx.cacheId(), locMap, affCache.similarAffinityKey());
if (exchId != null)
m.addPartitionUpdateCounters(cacheCtx.cacheId(), cacheCtx.topology().updateCounters(true));
}
}
}
});
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
addFullPartitionsMap(m, dupData, compress, top.cacheId(), map, top.similarAffinityKey());
if (exchId != null)
m.addPartitionUpdateCounters(top.cacheId(), top.updateCounters(true));
}
return m;
}
Aggregations