use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class SnapshotFutureTask method onMarkCheckpointBegin.
/**
* {@inheritDoc}
*/
@Override
public void onMarkCheckpointBegin(Context ctx) {
// Write lock is hold. Partition pages counters has been collected under write lock.
if (stopping())
return;
try {
for (Map.Entry<Integer, Set<Integer>> e : parts.entrySet()) {
int grpId = e.getKey();
Set<Integer> grpParts = e.getValue();
CacheGroupContext gctx = cctx.cache().cacheGroup(grpId);
Iterator<GridDhtLocalPartition> iter;
if (grpParts == null)
iter = gctx.topology().currentLocalPartitions().iterator();
else {
if (grpParts.contains(INDEX_PARTITION)) {
throw new IgniteCheckedException("Index partition cannot be included into snapshot if " + " set of cache group partitions has been explicitly provided [grpId=" + grpId + ']');
}
iter = F.iterator(grpParts, gctx.topology()::localPartition, false);
}
Set<Integer> owning = new HashSet<>();
Set<Integer> missed = new HashSet<>();
// Iterate over partitions in particular cache group.
while (iter.hasNext()) {
GridDhtLocalPartition part = iter.next();
// There is no data assigned to partition, thus it haven't been created yet.
if (part.state() == GridDhtPartitionState.OWNING)
owning.add(part.id());
else
missed.add(part.id());
}
boolean affNode = gctx.nodeFilter() == null || gctx.nodeFilter().apply(cctx.localNode());
if (grpParts != null) {
// Exit with an error.
if (!missed.isEmpty()) {
throw new IgniteCheckedException("Snapshot operation cancelled due to " + "not all of requested partitions has OWNING state on local node [grpId=" + grpId + ", missed=" + S.compact(missed) + ']');
}
} else {
// OWNING state, so index partition must be included into snapshot.
if (!missed.isEmpty()) {
log.warning("All local cache group partitions in OWNING state have been included into a snapshot. " + "Partitions which have different states skipped. Index partitions has also been skipped " + "[snpName=" + snpName + ", grpId=" + grpId + ", missed=" + S.compact(missed) + ']');
} else if (affNode && missed.isEmpty() && cctx.kernalContext().query().moduleEnabled())
owning.add(INDEX_PARTITION);
}
processed.put(grpId, owning);
}
List<CacheConfiguration<?, ?>> ccfgs = new ArrayList<>();
for (Map.Entry<Integer, Set<Integer>> e : processed.entrySet()) {
int grpId = e.getKey();
CacheGroupContext gctx = cctx.cache().cacheGroup(grpId);
if (gctx == null)
throw new IgniteCheckedException("Cache group is stopped : " + grpId);
ccfgs.add(gctx.config());
addPartitionWriters(grpId, e.getValue(), FilePageStoreManager.cacheDirName(gctx.config()));
}
if (withMetaStorage) {
processed.put(MetaStorage.METASTORAGE_CACHE_ID, MetaStorage.METASTORAGE_PARTITIONS);
addPartitionWriters(MetaStorage.METASTORAGE_CACHE_ID, MetaStorage.METASTORAGE_PARTITIONS, MetaStorage.METASTORAGE_DIR_NAME);
}
pageStore.readConfigurationFiles(ccfgs, (ccfg, ccfgFile) -> ccfgSndrs.add(new CacheConfigurationSender(ccfg.getName(), FilePageStoreManager.cacheDirName(ccfg), ccfgFile)));
} catch (IgniteCheckedException e) {
acceptException(e);
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class PartitionsEvictManager method showProgress.
/**
* Shows progress of eviction.
*/
private void showProgress() {
if (U.millisSinceNanos(lastShowProgressTimeNanos) >= evictionProgressFreqMs) {
int size = executor.getQueue().size();
if (log.isInfoEnabled()) {
log.info("Eviction in progress [groups=" + evictionGroupsMap.keySet().size() + ", remainingPartsToEvict=" + size + ']');
evictionGroupsMap.values().forEach(GroupEvictionContext::showProgress);
if (!logEvictPartByGrps.isEmpty()) {
StringJoiner evictPartJoiner = new StringJoiner(", ");
logEvictPartByGrps.forEach((grpId, map) -> {
CacheGroupContext grpCtx = cctx.cache().cacheGroup(grpId);
String grpName = (nonNull(grpCtx) ? grpCtx.cacheOrGroupName() : null);
evictPartJoiner.add("[grpId=" + grpId + ", grpName=" + grpName + ", " + toString(map) + ']');
});
log.info("Partitions have been scheduled for eviction: " + evictPartJoiner);
logEvictPartByGrps.clear();
}
}
lastShowProgressTimeNanos = System.nanoTime();
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class CacheExchangeMergeTest method checkExchanges.
/**
* @param node Node.
* @param vers Expected exchange versions.
*/
private void checkExchanges(Ignite node, long... vers) {
IgniteKernal node0 = (IgniteKernal) node;
List<AffinityTopologyVersion> expVers = new ArrayList<>();
for (long ver : vers) expVers.add(new AffinityTopologyVersion(ver));
List<AffinityTopologyVersion> doneVers = new ArrayList<>();
List<GridDhtPartitionsExchangeFuture> futs = node0.context().cache().context().exchange().exchangeFutures();
for (int i = futs.size() - 1; i >= 0; i--) {
GridDhtPartitionsExchangeFuture fut = futs.get(i);
if (!fut.isMerged() && fut.exchangeDone() && fut.firstEvent().type() != EVT_DISCOVERY_CUSTOM_EVT) {
AffinityTopologyVersion resVer = fut.topologyVersion();
Assert.assertNotNull(resVer);
doneVers.add(resVer);
}
}
assertEquals(expVers, doneVers);
for (CacheGroupContext grpCtx : node0.context().cache().cacheGroups()) {
for (AffinityTopologyVersion ver : grpCtx.affinity().cachedVersions()) {
if (ver.minorTopologyVersion() > 0)
continue;
assertTrue("Unexpected version [ver=" + ver + ", exp=" + expVers + ']', expVers.contains(ver));
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class RebalanceStatisticsTest method testRebalanceStatistics.
/**
* Test statistics of a rebalance.
*
* Steps:
* 1)Creating and filling a cluster;
* 2)Starting a new node with listening for logs and supply messages;
* 3)Check that number of supply messages is equal to number of logs received +1;
* 4)Find corresponding message in log for each supply message;
* 5)Find log message after all of groups and to check its correctness.
*
* @throws Exception if any error occurs.
*/
@Test
public void testRebalanceStatistics() throws Exception {
createCluster(3);
ListeningTestLogger listeningTestLog = new ListeningTestLogger(log);
IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(3)).setGridLogger(listeningTestLog);
// Collect log messages with rebalance statistics.
Collection<String> logMsgs = new ConcurrentLinkedQueue<>();
listeningTestLog.registerListener(new CallbackExecutorLogListener("Completed( \\(final\\))? rebalanc(ing|e chain).*", logMsgs::add));
Map<Ignite, Collection<T2<ClusterNode, Message>>> recordMsgs = new ConcurrentHashMap<>();
G.allGrids().forEach(n -> TestRecordingCommunicationSpi.spi(n).record((node, msg) -> {
if (GridDhtPartitionSupplyMessage.class.isInstance(msg))
recordMsgs.computeIfAbsent(n, n1 -> new ConcurrentLinkedQueue<>()).add(new T2<>(node, msg));
return false;
}));
IgniteEx node = startGrid(cfg);
awaitPartitionMapExchange();
// Collect supply messages only for new node.
Map<Ignite, List<GridDhtPartitionSupplyMessage>> supplyMsgs = G.allGrids().stream().filter(n -> !n.equals(node)).collect(toMap(identity(), n -> recordMsgs.get(n).stream().filter(t2 -> t2.get1().id().equals(node.localNode().id())).map(IgniteBiTuple::get2).map(GridDhtPartitionSupplyMessage.class::cast).collect(toList())));
// +1 because one message about end of rebalance for all groups.
assertEquals(supplyMsgs.values().stream().mapToInt(List::size).sum() + 1, logMsgs.size());
IgniteClosure2X<GridCacheEntryInfo, CacheObjectContext, Long> getSize = new IgniteClosure2X<GridCacheEntryInfo, CacheObjectContext, Long>() {
/**
* {@inheritDoc}
*/
@Override
public Long applyx(GridCacheEntryInfo info, CacheObjectContext ctx) throws IgniteCheckedException {
return (long) info.marshalledSize(ctx);
}
};
for (Map.Entry<Ignite, List<GridDhtPartitionSupplyMessage>> supplyMsg : supplyMsgs.entrySet()) {
List<String> supplierMsgs = logMsgs.stream().filter(s -> s.contains("supplier=" + supplyMsg.getKey().cluster().localNode().id())).collect(toList());
List<GridDhtPartitionSupplyMessage> msgs = supplyMsg.getValue();
assertEquals(msgs.size(), supplierMsgs.size());
for (GridDhtPartitionSupplyMessage msg : msgs) {
Map<Integer, CacheEntryInfoCollection> infos = U.field(msg, "infos");
CacheGroupContext grpCtx = node.context().cache().cacheGroup(msg.groupId());
long bytes = 0;
for (CacheEntryInfoCollection c : infos.values()) {
for (GridCacheEntryInfo i : c.infos()) bytes += getSize.apply(i, grpCtx.cacheObjectContext());
}
String[] checVals = { "grp=" + grpCtx.cacheOrGroupName(), "partitions=" + infos.size(), "entries=" + infos.values().stream().mapToInt(i -> i.infos().size()).sum(), "topVer=" + msg.topologyVersion(), "rebalanceId=" + U.field(msg, "rebalanceId"), "bytesRcvd=" + U.humanReadableByteCount(bytes), "fullPartitions=" + infos.size(), "fullEntries=" + infos.values().stream().mapToInt(i -> i.infos().size()).sum(), "fullBytesRcvd=" + U.humanReadableByteCount(bytes), "histPartitions=0", "histEntries=0", "histBytesRcvd=0" };
assertTrue("msgs=" + supplierMsgs.toString() + ", checVals=" + asList(checVals).toString(), supplierMsgs.stream().anyMatch(s -> Stream.of(checVals).allMatch(s::contains)));
}
}
String rebChainMsg = logMsgs.stream().filter(s -> s.startsWith("Completed rebalance chain")).findAny().get();
long rebId = -1;
int parts = 0;
int entries = 0;
long bytes = 0;
for (List<GridDhtPartitionSupplyMessage> msgs : supplyMsgs.values()) {
for (GridDhtPartitionSupplyMessage msg : msgs) {
Map<Integer, CacheEntryInfoCollection> infos = U.field(msg, "infos");
rebId = U.field(msg, "rebalanceId");
parts += infos.size();
entries += infos.values().stream().mapToInt(i -> i.infos().size()).sum();
CacheObjectContext cacheObjCtx = node.context().cache().cacheGroup(msg.groupId()).cacheObjectContext();
for (CacheEntryInfoCollection c : infos.values()) {
for (GridCacheEntryInfo i : c.infos()) bytes += getSize.apply(i, cacheObjCtx);
}
}
}
String[] checVals = { "partitions=" + parts, "entries=" + entries, "rebalanceId=" + rebId, "bytesRcvd=" + U.humanReadableByteCount(bytes) };
assertTrue(rebChainMsg, Stream.of(checVals).allMatch(rebChainMsg::contains));
}
use of org.apache.ignite.internal.processors.cache.CacheGroupContext in project ignite by apache.
the class GridCacheRebalancingPartitionCountersTest method checkUpdCounter.
/**
*/
private void checkUpdCounter(IgniteEx ignite, List<String> issues, HashMap<Integer, Long> partMap) {
final CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
assertNotNull(grpCtx);
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) grpCtx.topology();
List<GridDhtLocalPartition> locParts = top.localPartitions();
for (GridDhtLocalPartition part : locParts) {
Long cnt = partMap.get(part.id());
if (cnt == null)
partMap.put(part.id(), part.updateCounter());
if ((cnt != null && part.updateCounter() != cnt) || part.updateCounter() == 0)
issues.add("Node name " + ignite.name() + "Part = " + part.id() + " updCounter " + part.updateCounter());
}
}
Aggregations