use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class IgniteCacheLockPartitionOnAffinityRunTest method getPersonsCountFromPartitionMap.
/**
* @param ignite Ignite.
* @param orgId Organization id.
* @return Count of found Person object with specified orgId
* @throws Exception If failed.
*/
private static int getPersonsCountFromPartitionMap(final IgniteEx ignite, int orgId) throws Exception {
int part = ignite.affinity(Organization.class.getSimpleName()).partition(orgId);
GridCacheAdapter<?, ?> cacheAdapterPers = ignite.context().cache().internalCache(Person.class.getSimpleName());
GridDhtLocalPartition pPers = cacheAdapterPers.context().topology().localPartition(part, AffinityTopologyVersion.NONE, false);
int cnt = 0;
GridCursor<? extends CacheDataRow> c = pPers.dataStore().cursor();
CacheObjectContext ctx = cacheAdapterPers.context().cacheObjectContext();
while (c.next()) {
CacheDataRow e = c.get();
Person.Key k = e.key().value(ctx, false);
Person p = e.value().value(ctx, false);
if (p != null && p.getOrgId() == orgId && k.orgId == orgId)
++cnt;
}
return cnt;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class RetryCauseMessageSelfTest method testReplicatedCacheReserveFailureMessage.
/**
* Failed to reserve partitions for query (partition of REPLICATED cache is not in OWNING state)
*/
@Ignore("https://issues.apache.org/jira/browse/IGNITE-7039")
@Test
public void testReplicatedCacheReserveFailureMessage() {
GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
final GridKernalContext ctx = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "ctx");
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", new MockGridMapQueryExecutor() {
@Override
public void onQueryRequest(ClusterNode node, GridH2QueryRequest qryReq) throws IgniteCheckedException {
GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(qryReq.caches().get(0));
GridDhtLocalPartition part = cctx.topology().localPartition(0, NONE, false);
AtomicLong aState = GridTestUtils.getFieldValue(part, GridDhtLocalPartition.class, "state");
long stateVal = aState.getAndSet(2);
startedExecutor.onQueryRequest(node, qryReq);
aState.getAndSet(stateVal);
}
}.insertRealExecutor(mapQryExec));
SqlQuery<String, Organization> qry = new SqlQuery<>(Organization.class, ORG_SQL);
qry.setDistributedJoins(true);
try {
orgCache.query(qry).getAll();
} catch (CacheException e) {
assertTrue(e.getMessage().contains("Failed to reserve partitions for query (partition of REPLICATED cache is not in OWNING state) ["));
return;
} finally {
GridTestUtils.setFieldValue(h2Idx, "mapQryExec", mapQryExec);
}
fail();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GatherPartitionStatistics method recollectPartition.
/**
* Collect some statistics, fix existing in repo and return resulting partition statistics.
*
* @param cctx Cache context to get partition from.
* @param partStat Existing partition statistics to fix or use as a base.
* @param colsToCollect Columns to collect.
* @param colsToRemove Columns to remove.
* @return New partition statistics.
*/
private ObjectPartitionStatisticsImpl recollectPartition(GridCacheContext<?, ?> cctx, ObjectPartitionStatisticsImpl partStat, Map<String, StatisticsColumnConfiguration> colsToCollect, Set<String> colsToRemove) {
CacheGroupContext grp = cctx.group();
GridDhtPartitionTopology top = grp.topology();
AffinityTopologyVersion topVer = top.readyTopologyVersion();
GridDhtLocalPartition locPart = top.localPartition(partId, topVer, false);
if (locPart == null)
throw new GatherStatisticCancelException();
boolean reserved = locPart.reserve();
GridH2Table tbl = gathCtx.table();
ObjectPartitionStatisticsImpl res = null;
try {
if (!reserved || (locPart.state() != OWNING)) {
if (log.isDebugEnabled()) {
log.debug("Partition not owning. Need to retry [part=" + partId + ", tbl=" + tbl.identifier() + ']');
}
throw new GatherStatisticCancelException();
}
Column[] cols = IgniteStatisticsHelper.filterColumns(tbl.getColumns(), colsToCollect.keySet());
ColumnStatisticsCollector[] collectors = new ColumnStatisticsCollector[cols.length];
for (int i = 0; i < cols.length; ++i) {
long colCfgVer = colsToCollect.get(cols[i].getName()).version();
collectors[i] = new ColumnStatisticsCollector(cols[i], tbl::compareTypeSafe, colCfgVer);
}
GridH2RowDescriptor rowDesc = tbl.rowDescriptor();
GridQueryTypeDescriptor typeDesc = rowDesc.type();
try {
int checkInt = CANCELLED_CHECK_INTERVAL;
if (log.isDebugEnabled()) {
log.debug("Start partition scan [part=" + partId + ", tbl=" + gathCtx.table().identifier() + ']');
}
for (CacheDataRow row : grp.offheap().cachePartitionIterator(gathCtx.table().cacheId(), partId, null, false)) {
if (--checkInt == 0) {
if (gathCtx.future().isCancelled())
throw new GatherStatisticCancelException();
checkInt = CANCELLED_CHECK_INTERVAL;
}
if (!typeDesc.matchType(row.value()) || wasExpired(row))
continue;
H2Row h2row = rowDesc.createRow(row);
for (ColumnStatisticsCollector colStat : collectors) colStat.add(h2row.getValue(colStat.col().getColumnId()));
}
} catch (IgniteCheckedException e) {
log.warning(String.format("Unable to collect partition level statistics by %s.%s:%d due to %s", tbl.identifier().schema(), tbl.identifier().table(), partId, e.getMessage()));
throw new IgniteException("Unable to collect partition level statistics", e);
}
Map<String, ColumnStatistics> colStats = Arrays.stream(collectors).collect(Collectors.toMap(csc -> csc.col().getName(), ColumnStatisticsCollector::finish));
// Add existing to full replace existing statistics with new one.
if (partStat != null) {
for (Map.Entry<String, ColumnStatistics> oldColStat : partStat.columnsStatistics().entrySet()) {
if (!colsToRemove.contains(oldColStat.getKey()))
colStats.putIfAbsent(oldColStat.getKey(), oldColStat.getValue());
}
}
res = new ObjectPartitionStatisticsImpl(partId, getRowCount(colStats), locPart.updateCounter(), colStats);
} finally {
if (reserved)
locPart.release();
}
statRepo.replaceLocalPartitionStatistics(gathCtx.configuration().key(), res);
if (gathCtx.configuration().columns().size() == colsToCollect.size())
statRepo.refreshObsolescence(gathCtx.configuration().key(), partId);
return res;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCommonAbstractTest method awaitPartitionMapExchange.
/**
* @param waitEvicts If {@code true} will wait for evictions finished.
* @param waitNode2PartUpdate If {@code true} will wait for nodes node2part info update finished.
* @param nodes Optional nodes. If {@code null} method will wait for all nodes, for non null collection nodes will
* be filtered
* @param printPartState If {@code true} will print partition state if evictions not happened.
* @param cacheNames Wait for specific caches.
* @throws InterruptedException If interrupted.
*/
@SuppressWarnings("BusyWait")
protected void awaitPartitionMapExchange(boolean waitEvicts, boolean waitNode2PartUpdate, @Nullable Collection<ClusterNode> nodes, boolean printPartState, @Nullable Set<String> cacheNames) throws InterruptedException {
long timeout = getPartitionMapExchangeTimeout();
long startTime = -1;
Set<String> names = new HashSet<>();
Ignite crd = null;
for (Ignite g : G.allGrids()) {
ClusterNode node = g.cluster().localNode();
if (crd == null || node.order() < crd.cluster().localNode().order()) {
crd = g;
if (node.order() == 1)
break;
}
}
if (crd == null)
return;
AffinityTopologyVersion waitTopVer = ((IgniteKernal) crd).context().discovery().topologyVersionEx();
if (waitTopVer.topologyVersion() <= 0)
waitTopVer = new AffinityTopologyVersion(1, 0);
for (Ignite g : G.allGrids()) {
if (nodes != null && !nodes.contains(g.cluster().localNode()))
continue;
IgniteKernal g0 = (IgniteKernal) g;
names.add(g0.configuration().getIgniteInstanceName());
if (startTime != -1) {
if (startTime != g0.context().discovery().gridStartTime())
fail("Found nodes from different clusters, probable some test does not stop nodes " + "[allNodes=" + names + ']');
} else
startTime = g0.context().discovery().gridStartTime();
if (g.cluster().localNode().isDaemon())
continue;
IgniteInternalFuture<?> exchFut = g0.context().cache().context().exchange().affinityReadyFuture(waitTopVer);
if (exchFut != null && !exchFut.isDone()) {
try {
exchFut.get(timeout);
} catch (IgniteCheckedException e) {
log.error("Failed to wait for exchange [topVer=" + waitTopVer + ", node=" + g0.name() + ']', e);
}
}
for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
CacheConfiguration cfg = c.context().config();
if (cfg == null || cacheNames != null && !cacheNames.contains(cfg.getName()))
continue;
if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE && g.cluster().nodes().size() > 1) {
AffinityFunction aff = cfg.getAffinity();
GridDhtCacheAdapter<?, ?> dht = dht(c);
GridDhtPartitionTopology top = dht.topology();
for (int p = 0; p < aff.partitions(); p++) {
long start = 0;
for (int i = 0; ; i++) {
boolean match = false;
GridCachePartitionExchangeManager<?, ?> exchMgr = dht.context().shared().exchange();
AffinityTopologyVersion readyVer = exchMgr.readyAffinityVersion();
// Otherwise, there may be an assertion when printing top.readyTopologyVersion().
try {
IgniteInternalFuture<?> fut = exchMgr.affinityReadyFuture(readyVer);
if (fut != null)
fut.get();
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
if (readyVer.topologyVersion() > 0 && c.context().started()) {
// Must map on updated version of topology.
List<ClusterNode> affNodes = dht.context().affinity().assignment(readyVer).idealAssignment().get(p);
int affNodesCnt = affNodes.size();
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
Collection<ClusterNode> owners = (topFut != null && topFut.isDone()) ? top.owners(p, AffinityTopologyVersion.NONE) : Collections.<ClusterNode>emptyList();
int ownerNodesCnt = owners.size();
GridDhtLocalPartition loc = top.localPartition(p, readyVer, false);
boolean notPrimary = !affNodes.isEmpty() && !affNodes.get(0).equals(dht.context().affinity().primaryByPartition(p, readyVer));
if (affNodesCnt != ownerNodesCnt || !affNodes.containsAll(owners) || (waitEvicts && loc != null && loc.state() != OWNING) || notPrimary) {
if (i % 50 == 0)
LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", affNodesCnt=" + affNodesCnt + ", ownersCnt=" + ownerNodesCnt + ", affNodes=" + F.nodeIds(affNodes) + ", owners=" + F.nodeIds(owners) + ", topFut=" + topFut + ", locNode=" + g.cluster().localNode() + ']');
} else
match = true;
} else {
if (i % 50 == 0)
LT.warn(log(), "Waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", started=" + dht.context().started() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
}
if (!match) {
if (i == 0)
start = System.currentTimeMillis();
if (System.currentTimeMillis() - start > timeout) {
U.dumpThreads(log);
if (printPartState)
printPartitionState(c);
throw new IgniteException("Timeout of waiting for topology map update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", p=" + p + ", readVer=" + readyVer + ", locNode=" + g.cluster().localNode() + ']');
}
// Busy wait.
Thread.sleep(20);
continue;
}
if (i > 0)
log().warning("Finished waiting for topology map update [igniteInstanceName=" + g.name() + ", p=" + p + ", duration=" + (System.currentTimeMillis() - start) + "ms]");
break;
}
}
if (waitNode2PartUpdate) {
long start = System.currentTimeMillis();
boolean failed = true;
while (failed) {
failed = false;
for (GridDhtPartitionMap pMap : top.partitionMap(true).values()) {
if (failed)
break;
for (Map.Entry entry : pMap.entrySet()) {
if (System.currentTimeMillis() - start > timeout) {
U.dumpThreads(log);
throw new IgniteException("Timeout of waiting for partition state update [" + "igniteInstanceName=" + g.name() + ", cache=" + cfg.getName() + ", cacheId=" + dht.context().cacheId() + ", topVer=" + top.readyTopologyVersion() + ", locNode=" + g.cluster().localNode() + ']');
}
if (entry.getValue() != OWNING) {
LT.warn(log(), "Waiting for correct partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + g.name() + ", cache=" + c.getName());
// Busy wait.
Thread.sleep(200);
failed = true;
break;
}
}
}
}
}
}
}
}
log.info("awaitPartitionMapExchange finished");
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCommonAbstractTest method printPartitionState.
/**
* @param cacheName Cache name.
* @param firstParts Count partition for print (will be print first count partition).
* @param nodes Grid nodes.
*
* Print partitionState for cache.
*/
protected static void printPartitionState(String cacheName, int firstParts, List<? extends Ignite> nodes) {
StringBuilder sb = new StringBuilder();
sb.append("----preload sync futures----\n");
for (Ignite ig : nodes) {
IgniteKernal k = ((IgniteKernal) ig);
GridCacheAdapter<Object, Object> adapter = k.internalCache(cacheName);
if (adapter == null)
continue;
IgniteInternalFuture<?> syncFut = adapter.preloader().syncFuture();
sb.append("nodeId=").append(k.context().localNodeId()).append(" consistentId=").append(k.localNode().consistentId()).append(" isDone=").append(syncFut.isDone()).append("\n");
}
sb.append("----rebalance futures----\n");
for (Ignite ig : nodes) {
IgniteKernal k = ((IgniteKernal) ig);
GridCacheAdapter<Object, Object> adapter = k.internalCache(cacheName);
if (adapter == null)
continue;
IgniteInternalFuture<?> f = adapter.preloader().rebalanceFuture();
try {
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(f.isDone()).append(" res=").append(f.isDone() ? f.get() : "N/A").append(" topVer=").append((U.hasField(f, "topVer") ? String.valueOf(U.<Object>field(f, "topVer")) : "N/A")).append("\n");
Map<UUID, IgniteDhtDemandedPartitionsMap> remaining = U.field(f, "remaining");
sb.append("remaining: ");
sb.append(remaining.toString());
sb.append("\n");
} catch (Throwable e) {
log.error(e.getMessage());
}
}
sb.append("----partition state----\n");
for (Ignite g : nodes) {
IgniteKernal g0 = (IgniteKernal) g;
sb.append("localNodeId=").append(g0.localNode().id()).append(" grid=").append(g0.name()).append("\n");
IgniteCacheProxy<?, ?> cache = null;
try {
cache = g0.context().cache().jcache(cacheName);
} catch (IllegalArgumentException e) {
// Client topology.
continue;
}
GridDhtCacheAdapter<?, ?> dht = dht(cache);
GridDhtPartitionTopology top = dht.topology();
int parts = firstParts == 0 ? cache.context().config().getAffinity().partitions() : firstParts;
for (int p = 0; p < parts; p++) {
AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
Collection<UUID> affNodes = F.nodeIds(dht.context().affinity().assignment(readyVer).idealAssignment().get(p));
GridDhtLocalPartition part = top.localPartition(p, AffinityTopologyVersion.NONE, false);
sb.append("local part=");
if (part != null) {
sb.append(p).append(" counters=").append(part.dataStore().partUpdateCounter()).append(" fullSize=").append(part.fullSize()).append(" state=").append(part.state()).append(" reservations=").append(part.reservations());
} else
sb.append(p).append(" is null");
sb.append(" isAffNode=").append(affNodes.contains(g0.localNode().id())).append("\n");
for (UUID nodeId : F.nodeIds(g0.context().discovery().allNodes())) {
if (!nodeId.equals(g0.localNode().id())) {
top.readLock();
// Peek to remote state directly to distinguish if a partition is EVICTED or yet not initialized.
GridDhtPartitionFullMap map = U.field(top, "node2part");
try {
final GridDhtPartitionMap nodeMap = map.get(nodeId);
if (nodeMap == null)
// Skip client node.
continue;
final GridDhtPartitionState rmtState = nodeMap.get(p);
if (rmtState != null) {
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" state=").append(rmtState).append(" isAffNode=").append(affNodes.contains(nodeId)).append("\n");
} else {
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" is null").append("\n");
}
} finally {
top.readUnlock();
}
}
}
}
sb.append("\n");
}
log.info("dump partitions state for <" + cacheName + ">:\n" + sb.toString());
}
Aggregations