use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method updateTopologies.
/**
* Updates topology versions and discovery caches on all topologies.
*
* @param crd Coordinator flag.
* @throws IgniteCheckedException If failed.
*/
private void updateTopologies(boolean crd) throws IgniteCheckedException {
for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
if (grp.isLocal())
continue;
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();
GridDhtPartitionTopology top = grp.topology();
if (crd) {
boolean updateTop = exchId.topologyVersion().equals(grp.localStartVersion());
if (updateTop && clientTop != null) {
cctx.exchange().exchangerBlockingSectionBegin();
try {
top.update(null, clientTop.partitionMap(true), clientTop.fullUpdateCounters(), emptySet(), null, null, null, clientTop.lostPartitions());
} finally {
cctx.exchange().exchangerBlockingSectionEnd();
}
}
}
cctx.exchange().exchangerBlockingSectionBegin();
try {
top.updateTopologyVersion(this, events().discoveryCache(), updSeq, cacheGroupStopping(grp.groupId()));
} finally {
cctx.exchange().exchangerBlockingSectionEnd();
}
}
cctx.exchange().exchangerBlockingSectionBegin();
try {
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
top.updateTopologyVersion(this, events().discoveryCache(), -1, cacheGroupStopping(top.groupId()));
}
} finally {
cctx.exchange().exchangerBlockingSectionEnd();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsFullMessage.
/**
* Creates partitions full message for selected cache groups.
*
* @param compress {@code True} if possible to compress message (properly work only if prepareMarshall/
* finishUnmarshall methods are called).
* @param newCntrMap {@code True} if possible to use {@link CachePartitionFullCountersMap}.
* @param exchId Non-null exchange ID if message is created for exchange.
* @param lastVer Last version.
* @param partHistSuppliers Partition history suppliers map.
* @param partsToReload Partitions to reload map.
* @param grps Selected cache groups.
* @return Message.
*/
public GridDhtPartitionsFullMessage createPartitionsFullMessage(boolean compress, boolean newCntrMap, @Nullable final GridDhtPartitionExchangeId exchId, @Nullable GridCacheVersion lastVer, @Nullable IgniteDhtPartitionHistorySuppliersMap partHistSuppliers, @Nullable IgniteDhtPartitionsToReloadMap partsToReload, Collection<CacheGroupContext> grps) {
AffinityTopologyVersion ver = exchId != null ? exchId.topologyVersion() : AffinityTopologyVersion.NONE;
final GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(exchId, lastVer, ver, partHistSuppliers, partsToReload);
m.compressed(compress);
final Map<Object, T2<Integer, GridDhtPartitionFullMap>> dupData = new HashMap<>();
Map<Integer, Map<Integer, Long>> partsSizes = new HashMap<>();
for (CacheGroupContext grp : grps) {
if (!grp.isLocal()) {
if (exchId != null) {
AffinityTopologyVersion startTopVer = grp.localStartVersion();
if (startTopVer.compareTo(exchId.topologyVersion()) > 0)
continue;
}
GridAffinityAssignmentCache affCache = grp.affinity();
GridDhtPartitionFullMap locMap = grp.topology().partitionMap(true);
if (locMap != null)
addFullPartitionsMap(m, dupData, compress, grp.groupId(), locMap, affCache.similarAffinityKey());
Map<Integer, Long> partSizesMap = grp.topology().globalPartSizes();
if (!partSizesMap.isEmpty())
partsSizes.put(grp.groupId(), partSizesMap);
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = grp.topology().fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(grp.groupId(), cntrsMap);
else {
m.addPartitionUpdateCounters(grp.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
}
// Lost partitions can be skipped on node left or activation.
m.addLostPartitions(grp.groupId(), grp.topology().lostPartitions());
}
}
}
// It is important that client topologies be added after contexts.
for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
GridDhtPartitionFullMap map = top.partitionMap(true);
if (map != null)
addFullPartitionsMap(m, dupData, compress, top.groupId(), map, top.similarAffinityKey());
if (exchId != null) {
CachePartitionFullCountersMap cntrsMap = top.fullUpdateCounters();
if (newCntrMap)
m.addPartitionUpdateCounters(top.groupId(), cntrsMap);
else
m.addPartitionUpdateCounters(top.groupId(), CachePartitionFullCountersMap.toCountersMap(cntrsMap));
Map<Integer, Long> partSizesMap = top.globalPartSizes();
if (!partSizesMap.isEmpty())
partsSizes.put(top.groupId(), partSizesMap);
m.addLostPartitions(top.groupId(), top.lostPartitions());
}
}
if (!partsSizes.isEmpty())
m.partitionSizes(cctx, partsSizes);
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method createPartitionsSingleMessage.
/**
* Creates partitions single message for selected cache groups.
*
* @param exchangeId Exchange ID.
* @param clientOnlyExchange Client exchange flag.
* @param sndCounters {@code True} if need send partition update counters.
* @param newCntrMap {@code True} if possible to use {@link CachePartitionPartialCountersMap}.
* @param grps Selected cache groups.
* @return Message.
*/
public GridDhtPartitionsSingleMessage createPartitionsSingleMessage(@Nullable GridDhtPartitionExchangeId exchangeId, boolean clientOnlyExchange, boolean sndCounters, boolean newCntrMap, ExchangeActions exchActions, Collection<CacheGroupContext> grps) {
GridDhtPartitionsSingleMessage m = new GridDhtPartitionsSingleMessage(exchangeId, clientOnlyExchange, cctx.versions().last(), true);
Map<Object, T2<Integer, GridPartitionStateMap>> dupData = new HashMap<>();
for (CacheGroupContext grp : grps) {
if (!grp.isLocal() && (exchActions == null || !exchActions.cacheGroupStopping(grp.groupId()))) {
GridDhtPartitionMap locMap = grp.topology().localPartitionMap();
addPartitionMap(m, dupData, true, grp.groupId(), locMap, grp.affinity().similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = grp.topology().localUpdateCounters(true);
m.addPartitionUpdateCounters(grp.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
m.addPartitionSizes(grp.groupId(), grp.topology().partitionSizes());
}
}
for (GridClientPartitionTopology top : clientTops.values()) {
if (m.partitions() != null && m.partitions().containsKey(top.groupId()))
continue;
GridDhtPartitionMap locMap = top.localPartitionMap();
addPartitionMap(m, dupData, true, top.groupId(), locMap, top.similarAffinityKey());
if (sndCounters) {
CachePartitionPartialCountersMap cntrsMap = top.localUpdateCounters(true);
m.addPartitionUpdateCounters(top.groupId(), newCntrMap ? cntrsMap : CachePartitionPartialCountersMap.toCountersMap(cntrsMap));
}
m.addPartitionSizes(top.groupId(), top.partitionSizes());
}
return m;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology in project ignite by apache.
the class GridCachePartitionExchangeManager method clientTopology.
/**
* @param grpId Cache group ID.
* @param discoCache Discovery data cache.
* @return Topology.
*/
public GridDhtPartitionTopology clientTopology(int grpId, DiscoCache discoCache) {
GridClientPartitionTopology top = clientTops.get(grpId);
if (top != null)
return top;
CacheGroupDescriptor grpDesc = cctx.affinity().cacheGroups().get(grpId);
assert grpDesc != null : "grpId=" + grpId;
CacheConfiguration<?, ?> ccfg = grpDesc.config();
AffinityFunction aff = ccfg.getAffinity();
Object affKey = cctx.kernalContext().affinity().similaryAffinityKey(aff, ccfg.getNodeFilter(), ccfg.getBackups(), aff.partitions());
GridClientPartitionTopology old = clientTops.putIfAbsent(grpId, top = new GridClientPartitionTopology(cctx, discoCache, grpId, aff.partitions(), affKey, ccfg.getPartitionLossPolicy()));
return old != null ? old : top;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology in project ignite by apache.
the class CacheAffinitySharedManager method processClientCacheStartRequests.
/**
* @param crd Coordinator flag.
* @param msg Change request.
* @param topVer Current topology version.
* @param discoCache Discovery data cache.
* @return Map of started caches (cache ID to near enabled flag).
*/
@Nullable
private Map<Integer, Boolean> processClientCacheStartRequests(boolean crd, ClientCacheChangeDummyDiscoveryMessage msg, AffinityTopologyVersion topVer, DiscoCache discoCache) {
Map<String, DynamicCacheChangeRequest> startReqs = msg.startRequests();
List<DynamicCacheDescriptor> startDescs = clientCachesToStart(msg.requestId(), startReqs);
if (startDescs == null || startDescs.isEmpty()) {
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return null;
}
Map<Integer, GridDhtAssignmentFetchFuture> fetchFuts = U.newHashMap(startDescs.size());
Map<Integer, Boolean> startedInfos = U.newHashMap(startDescs.size());
List<StartCacheInfo> startCacheInfos = startDescs.stream().map(desc -> {
DynamicCacheChangeRequest changeReq = startReqs.get(desc.cacheName());
startedInfos.put(desc.cacheId(), changeReq.nearCacheConfiguration() != null);
return new StartCacheInfo(desc.cacheConfiguration(), desc, changeReq.nearCacheConfiguration(), topVer, changeReq.disabledAfterStart(), true);
}).collect(Collectors.toList());
Set<String> startedCaches = startCacheInfos.stream().map(info -> info.getCacheDescriptor().cacheName()).collect(Collectors.toSet());
try {
cctx.cache().prepareStartCaches(startCacheInfos);
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
Set<CacheGroupDescriptor> groupDescs = startDescs.stream().map(DynamicCacheDescriptor::groupDescriptor).collect(Collectors.toSet());
for (CacheGroupDescriptor grpDesc : groupDescs) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(grpDesc.groupId());
assert grp != null : grpDesc.groupId();
assert !grp.affinityNode() || grp.isLocal() : grp.cacheOrGroupName();
// Skip for local caches.
if (grp.isLocal())
continue;
CacheGroupHolder grpHolder = grpHolders.get(grp.groupId());
assert !crd || (grpHolder != null && grpHolder.affinity().idealAssignmentRaw() != null);
if (grpHolder == null)
grpHolder = getOrCreateGroupHolder(topVer, grpDesc);
// If current node is not client and current node have no aff holder.
if (grpHolder.nonAffNode() && !cctx.localNode().isClient()) {
GridDhtPartitionsExchangeFuture excFut = context().exchange().lastFinishedFuture();
grp.topology().updateTopologyVersion(excFut, discoCache, -1, false);
// Exchange free cache creation, just replacing client topology with dht.
// Topology should be initialized before the use.
grp.topology().beforeExchange(excFut, true, false);
grpHolder = new CacheGroupAffNodeHolder(grp, grpHolder.affinity());
grpHolders.put(grp.groupId(), grpHolder);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
if (clientTop != null) {
grp.topology().update(grpHolder.affinity().lastVersion(), clientTop.partitionMap(true), clientTop.fullUpdateCounters(), Collections.<Integer>emptySet(), null, null, null, clientTop.lostPartitions());
excFut.validate(grp);
}
assert grpHolder.affinity().lastVersion().equals(grp.affinity().lastVersion());
} else if (!crd && !fetchFuts.containsKey(grp.groupId())) {
boolean topVerLessOrNotInitialized = !grp.topology().initialized() || grp.topology().readyTopologyVersion().compareTo(topVer) < 0;
if (grp.affinity().lastVersion().compareTo(topVer) < 0 || topVerLessOrNotInitialized) {
GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, grp.groupId(), topVer, discoCache);
fetchFut.init(true);
fetchFuts.put(grp.groupId(), fetchFut);
}
}
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (GridDhtAssignmentFetchFuture fetchFut : fetchFuts.values()) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(fetchFut.groupId());
assert grp != null;
GridDhtAffinityAssignmentResponse res = fetchAffinity(topVer, null, discoCache, grp.affinity(), fetchFut);
GridDhtPartitionFullMap partMap;
if (res != null) {
partMap = res.partitionMap();
assert partMap != null : res;
} else
partMap = new GridDhtPartitionFullMap(cctx.localNodeId(), cctx.localNode().order(), 1);
GridDhtPartitionsExchangeFuture exchFut = context().exchange().lastFinishedFuture();
grp.topology().updateTopologyVersion(exchFut, discoCache, -1, false);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
Set<Integer> lostParts = clientTop == null ? null : clientTop.lostPartitions();
grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, lostParts);
if (clientTop == null)
grp.topology().detectLostPartitions(topVer, exchFut);
exchFut.validate(grp);
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (DynamicCacheDescriptor desc : startDescs) {
if (desc.cacheConfiguration().getCacheMode() != LOCAL) {
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
assert grp != null;
grp.topology().onExchangeDone(null, grp.affinity().cachedAffinity(topVer), true);
}
}
cctx.cache().initCacheProxies(topVer, null);
startReqs.keySet().forEach(req -> cctx.cache().completeProxyInitialize(req));
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return startedInfos;
}
Aggregations