use of org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* @param targetNode Target node.
* @param exchangeId ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(ClusterNode targetNode, @Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, Map<Integer, GridDhtPartitionState>>> dupData = new HashMap<>();
for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
if (!cacheCtx.isLocal()) {
GridDhtPartitionMap locMap = cacheCtx.topology().localPartitionMap();
addPartitionMap(m, dupData, true, cacheCtx.cacheId(), locMap, cacheCtx.affinity().affinityCache().similarAffinityKey());
if (sndCounters)
m.partitionUpdateCounters(cacheCtx.cacheId(), cacheCtx.topology().updateCounters(true));
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.cacheId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.cacheId(), locMap, top.similarAffinityKey());
if (sndCounters)
m.partitionUpdateCounters(top.cacheId(), top.updateCounters(true));
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method clientTopology.
/**
* @param cacheId Cache ID.
* @param exchFut Exchange future.
* @return Topology.
*/
public GridDhtPartitionTopology clientTopology(int cacheId, GridDhtPartitionsExchangeFuture exchFut) {
GridClientPartitionTopology top = clientTops.get(cacheId);
if (top != null)
return top;
Object affKey = null;
DynamicCacheDescriptor desc = cctx.cache().cacheDescriptor(cacheId);
if (desc != null) {
CacheConfiguration ccfg = desc.cacheConfiguration();
AffinityFunction aff = ccfg.getAffinity();
affKey = cctx.kernalContext().affinity().similaryAffinityKey(aff, ccfg.getNodeFilter(), ccfg.getBackups(), aff.partitions());
}
GridClientPartitionTopology old = clientTops.putIfAbsent(cacheId, top = new GridClientPartitionTopology(cctx, cacheId, exchFut, affKey));
return old != null ? old : top;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param nodes Target nodes.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param compress {@code True} if it is possible to use compression for message.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(Collection<ClusterNode> nodes, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, final boolean compress) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
cctx.forAllCaches(new IgniteInClosure<GridCacheContext>() {
@Override
public void apply(GridCacheContext cacheCtx) {
if (!cacheCtx.isLocal()) {
boolean ready;
if (exchId != null) {
AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion();
ready = startTopVer.compareTo(exchId.topologyVersion()) <= 0;
} else
ready = cacheCtx.started();
if (ready) {
GridAffinityAssignmentCache affCache = cacheCtx.affinity().affinityCache();
GridDhtPartitionFullMap locMap = cacheCtx.topology().partitionMap(true);
addFullPartitionsMap(m, dupData, compress, cacheCtx.cacheId(), locMap, affCache.similarAffinityKey());
if (exchId != null)
m.addPartitionUpdateCounters(cacheCtx.cacheId(), cacheCtx.topology().updateCounters(true));
}
}
}
});
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
addFullPartitionsMap(m, dupData, compress, top.cacheId(), map, top.similarAffinityKey());
if (exchId != null)
m.addPartitionUpdateCounters(top.cacheId(), top.updateCounters(true));
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* @param exchangeId Exchange ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @param newCntrMap {@code True} if possible to use {@link CachePartitionPartialCountersMap}.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(@Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters, boolean newCntrMap, ExchangeActions exchActions) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, GridPartitionStateMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal() && (exchActions == null || !exchActions.cacheGroupStopping(grp.groupId()))) {
GridDhtPartitionMap locMap = grp.topology().localPartitionMap();
addPartitionMap(m, dupData, true, grp.groupId(), locMap, grp.affinity().similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = grp.topology().localUpdateCounters(true);
m.addPartitionUpdateCounters(grp.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.groupId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.groupId(), locMap, top.similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = top.localUpdateCounters(true);
m.addPartitionUpdateCounters(top.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* @param compress {@code True} if possible to compress message (properly work only if prepareMarshall/
* finishUnmarshall methods are called).
* @param newCntrMap {@code True} if possible to use {@link CachePartitionFullCountersMap}.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param partHistSuppliers Partition history suppliers map.
* @param partsToReload Partitions to reload map.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(boolean compress, boolean newCntrMap, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, @Nullable IgniteDhtPartitionHistorySuppliersMap partHistSuppliers, @Nullable IgniteDhtPartitionsToReloadMap partsToReload) {
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE, partHistSuppliers, partsToReload);
m.compress(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (!grp.isLocal()) {
if (exchId != null) {
AffinityTopologyVersion startTopVer = grp.localStartVersion();
if (startTopVer.compareTo(exchId.topologyVersion()) > 0)
continue;
}
GridAffinityAssignmentCache affCache = grp.affinity();
GridDhtPartitionFullMap locMap = grp.topology().partitionMap(true);
if (locMap != null) {
addFullPartitionsMap(m, dupData, compress, grp.groupId(), locMap, affCache.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = grp.topology().fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(grp.groupId(), cntrsMap);
else {
m.addPartitionUpdateCounters(grp.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
}
}
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
if (map != null) {
addFullPartitionsMap(m, dupData, compress, top.groupId(), map, top.similarAffinityKey());
}
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = top.fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(top.groupId(), cntrsMap);
else
m.addPartitionUpdateCounters(top.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
}
return m;
}
Aggregations