use of org.apache.ignite.cache.CacheMode.LOCAL in project ignite by apache.
the class IgnitePdsPartitionPreloadTest method preloadPartition.
/**
* @param execNodeFactory Test node factory.
* @param preloadMode Preload mode.
*/
private void preloadPartition(Supplier<Ignite> execNodeFactory, PreloadMode preloadMode) throws Exception {
Ignite crd = startGridsMultiThreaded(GRIDS_CNT);
Ignite testNode = grid(1);
Object consistentId = testNode.cluster().localNode().consistentId();
assertEquals(PRIMARY_NODE, testNode.cluster().localNode().consistentId());
boolean locCacheMode = testNode.cache(DEFAULT_CACHE_NAME).getConfiguration(CacheConfiguration.class).getCacheMode() == LOCAL;
Integer key = primaryKey(testNode.cache(DEFAULT_CACHE_NAME));
int preloadPart = crd.affinity(DEFAULT_CACHE_NAME).partition(key);
int cnt = 0;
try (IgniteDataStreamer<Integer, Integer> streamer = testNode.dataStreamer(DEFAULT_CACHE_NAME)) {
int k = 0;
while (cnt < ENTRY_CNT) {
if (testNode.affinity(DEFAULT_CACHE_NAME).partition(k) == preloadPart) {
streamer.addData(k, k);
cnt++;
}
k++;
}
}
forceCheckpoint();
stopAllGrids();
startGridsMultiThreaded(GRIDS_CNT);
testNode = G.allGrids().stream().filter(ignite -> PRIMARY_NODE.equals(ignite.cluster().localNode().consistentId())).findFirst().get();
if (!locCacheMode)
assertEquals(testNode, primaryNode(key, DEFAULT_CACHE_NAME));
Ignite execNode = execNodeFactory.get();
switch(preloadMode) {
case SYNC:
execNode.cache(DEFAULT_CACHE_NAME).preloadPartition(preloadPart);
if (locCacheMode) {
testNode = G.allGrids().stream().filter(ignite -> ignite.cluster().localNode().consistentId().equals(consistentId)).findFirst().get();
}
break;
case ASYNC:
execNode.cache(DEFAULT_CACHE_NAME).preloadPartitionAsync(preloadPart).get();
if (locCacheMode) {
testNode = G.allGrids().stream().filter(ignite -> ignite.cluster().localNode().consistentId().equals(consistentId)).findFirst().get();
}
break;
case LOCAL:
assertTrue(execNode.cache(DEFAULT_CACHE_NAME).localPreloadPartition(preloadPart));
// For local preloading testNode == execNode
testNode = execNode;
break;
}
long c0 = testNode.dataRegionMetrics(DEFAULT_REGION).getPagesRead();
// After partition preloading no pages should be read from store.
GridIterator<CacheDataRow> cursor = ((IgniteEx) testNode).cachex(DEFAULT_CACHE_NAME).context().offheap().cachePartitionIterator(CU.UNDEFINED_CACHE_ID, preloadPart, null, false);
int realSize = 0;
while (cursor.hasNext()) {
realSize++;
cursor.next();
}
assertEquals("Partition has missed some entries", ENTRY_CNT, realSize);
assertEquals("Read pages count must be same", c0, testNode.dataRegionMetrics(DEFAULT_REGION).getPagesRead());
}
use of org.apache.ignite.cache.CacheMode.LOCAL in project ignite by apache.
the class CacheAffinitySharedManager method processClientCacheStartRequests.
/**
* @param crd Coordinator flag.
* @param msg Change request.
* @param topVer Current topology version.
* @param discoCache Discovery data cache.
* @return Map of started caches (cache ID to near enabled flag).
*/
@Nullable
private Map<Integer, Boolean> processClientCacheStartRequests(boolean crd, ClientCacheChangeDummyDiscoveryMessage msg, AffinityTopologyVersion topVer, DiscoCache discoCache) {
Map<String, DynamicCacheChangeRequest> startReqs = msg.startRequests();
List<DynamicCacheDescriptor> startDescs = clientCachesToStart(msg.requestId(), startReqs);
if (startDescs == null || startDescs.isEmpty()) {
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return null;
}
Map<Integer, GridDhtAssignmentFetchFuture> fetchFuts = U.newHashMap(startDescs.size());
Map<Integer, Boolean> startedInfos = U.newHashMap(startDescs.size());
List<StartCacheInfo> startCacheInfos = startDescs.stream().map(desc -> {
DynamicCacheChangeRequest changeReq = startReqs.get(desc.cacheName());
startedInfos.put(desc.cacheId(), changeReq.nearCacheConfiguration() != null);
return new StartCacheInfo(desc.cacheConfiguration(), desc, changeReq.nearCacheConfiguration(), topVer, changeReq.disabledAfterStart(), true);
}).collect(Collectors.toList());
Set<String> startedCaches = startCacheInfos.stream().map(info -> info.getCacheDescriptor().cacheName()).collect(Collectors.toSet());
try {
cctx.cache().prepareStartCaches(startCacheInfos);
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
Set<CacheGroupDescriptor> groupDescs = startDescs.stream().map(DynamicCacheDescriptor::groupDescriptor).collect(Collectors.toSet());
for (CacheGroupDescriptor grpDesc : groupDescs) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(grpDesc.groupId());
assert grp != null : grpDesc.groupId();
assert !grp.affinityNode() || grp.isLocal() : grp.cacheOrGroupName();
// Skip for local caches.
if (grp.isLocal())
continue;
CacheGroupHolder grpHolder = grpHolders.get(grp.groupId());
assert !crd || (grpHolder != null && grpHolder.affinity().idealAssignmentRaw() != null);
if (grpHolder == null)
grpHolder = getOrCreateGroupHolder(topVer, grpDesc);
// If current node is not client and current node have no aff holder.
if (grpHolder.nonAffNode() && !cctx.localNode().isClient()) {
GridDhtPartitionsExchangeFuture excFut = context().exchange().lastFinishedFuture();
grp.topology().updateTopologyVersion(excFut, discoCache, -1, false);
// Exchange free cache creation, just replacing client topology with dht.
// Topology should be initialized before the use.
grp.topology().beforeExchange(excFut, true, false);
grpHolder = new CacheGroupAffNodeHolder(grp, grpHolder.affinity());
grpHolders.put(grp.groupId(), grpHolder);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
if (clientTop != null) {
grp.topology().update(grpHolder.affinity().lastVersion(), clientTop.partitionMap(true), clientTop.fullUpdateCounters(), Collections.<Integer>emptySet(), null, null, null, clientTop.lostPartitions());
excFut.validate(grp);
}
assert grpHolder.affinity().lastVersion().equals(grp.affinity().lastVersion());
} else if (!crd && !fetchFuts.containsKey(grp.groupId())) {
boolean topVerLessOrNotInitialized = !grp.topology().initialized() || grp.topology().readyTopologyVersion().compareTo(topVer) < 0;
if (grp.affinity().lastVersion().compareTo(topVer) < 0 || topVerLessOrNotInitialized) {
GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, grp.groupId(), topVer, discoCache);
fetchFut.init(true);
fetchFuts.put(grp.groupId(), fetchFut);
}
}
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (GridDhtAssignmentFetchFuture fetchFut : fetchFuts.values()) {
try {
CacheGroupContext grp = cctx.cache().cacheGroup(fetchFut.groupId());
assert grp != null;
GridDhtAffinityAssignmentResponse res = fetchAffinity(topVer, null, discoCache, grp.affinity(), fetchFut);
GridDhtPartitionFullMap partMap;
if (res != null) {
partMap = res.partitionMap();
assert partMap != null : res;
} else
partMap = new GridDhtPartitionFullMap(cctx.localNodeId(), cctx.localNode().order(), 1);
GridDhtPartitionsExchangeFuture exchFut = context().exchange().lastFinishedFuture();
grp.topology().updateTopologyVersion(exchFut, discoCache, -1, false);
GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId());
Set<Integer> lostParts = clientTop == null ? null : clientTop.lostPartitions();
grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null, lostParts);
if (clientTop == null)
grp.topology().detectLostPartitions(topVer, exchFut);
exchFut.validate(grp);
} catch (IgniteCheckedException e) {
cctx.cache().closeCaches(startedCaches, false);
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e);
return null;
}
}
for (DynamicCacheDescriptor desc : startDescs) {
if (desc.cacheConfiguration().getCacheMode() != LOCAL) {
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
assert grp != null;
grp.topology().onExchangeDone(null, grp.affinity().cachedAffinity(topVer), true);
}
}
cctx.cache().initCacheProxies(topVer, null);
startReqs.keySet().forEach(req -> cctx.cache().completeProxyInitialize(req));
cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null);
return startedInfos;
}
Aggregations