use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IdleVerifyUtility method getUpdateCountersSnapshot.
/**
* Gather updateCounters info.
* Holds {@link org.apache.ignite.internal.processors.cache.PartitionUpdateCounter#copy} of update counters.
*
* @param ign Ignite instance.
* @param grpIds Group Id`s.
* @return Current groups distribution with update counters per partitions.
*/
public static Map<Integer, Map<Integer, PartitionUpdateCounter>> getUpdateCountersSnapshot(IgniteEx ign, Set<Integer> grpIds) {
Map<Integer, Map<Integer, PartitionUpdateCounter>> partsWithCountersPerGrp = new HashMap<>();
for (Integer grpId : grpIds) {
CacheGroupContext grpCtx = ign.context().cache().cacheGroup(grpId);
if (grpCtx == null)
throw new GridNotIdleException("Group not found: " + grpId + "." + " Possible reasons: rebalance in progress or concurrent cache destroy.");
GridDhtPartitionTopology top = grpCtx.topology();
Map<Integer, PartitionUpdateCounter> partsWithCounters = partsWithCountersPerGrp.computeIfAbsent(grpId, k -> new HashMap<>());
for (GridDhtLocalPartition part : top.currentLocalPartitions()) {
if (part.state() != GridDhtPartitionState.OWNING)
continue;
@Nullable PartitionUpdateCounter updCntr = part.dataStore().partUpdateCounter();
partsWithCounters.put(part.id(), updCntr == null ? null : updCntr.copy());
}
}
return partsWithCountersPerGrp;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheAdapter method localPreloadPartition.
/**
* {@inheritDoc}
*/
@Override
public boolean localPreloadPartition(int part) throws IgniteCheckedException {
if (!ctx.affinityNode())
return false;
GridDhtPartitionTopology top = ctx.group().topology();
@Nullable GridDhtLocalPartition p = top.localPartition(part, top.readyTopologyVersion(), false);
if (p == null)
return false;
try {
if (!p.reserve() || p.state() != OWNING)
return false;
p.dataStore().preload();
} finally {
p.release();
}
return true;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgniteTxHandler method prepareNearTx.
/**
* @param originTx Transaction for copy.
* @param nearNode Node that initiated transaction.
* @param req Near prepare request.
* @return Prepare future or {@code null} if need retry operation.
*/
@Nullable
private IgniteInternalFuture<GridNearTxPrepareResponse> prepareNearTx(final GridNearTxLocal originTx, final ClusterNode nearNode, final GridNearTxPrepareRequest req) {
IgniteTxEntry firstEntry;
try {
IgniteTxEntry firstWrite = unmarshal(req.writes());
IgniteTxEntry firstRead = unmarshal(req.reads());
firstEntry = firstWrite != null ? firstWrite : firstRead;
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
GridDhtTxLocal tx = null;
GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
if (mappedVer != null) {
tx = ctx.tm().tx(mappedVer);
if (tx == null)
U.warn(log, "Missing local transaction for mapped near version [nearVer=" + req.version() + ", mappedVer=" + mappedVer + ']');
else {
if (req.concurrency() == PESSIMISTIC)
tx.nearFutureId(req.futureId());
}
} else {
GridDhtPartitionTopology top = null;
if (req.firstClientRequest()) {
assert firstEntry != null : req;
assert req.concurrency() == OPTIMISTIC : req;
assert nearNode.isClient() : nearNode;
top = firstEntry.context().topology();
top.readLock();
if (req.allowWaitTopologyFuture()) {
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!topFut.isDone()) {
top.readUnlock();
return null;
}
}
}
try {
if (top != null) {
boolean retry = false;
GridDhtTopologyFuture topFut = top.topologyVersionFuture();
if (!req.allowWaitTopologyFuture() && !topFut.isDone()) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology change is in progress, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (!retry && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req)) {
retry = true;
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Topology version mismatch for near prepare, need remap transaction [" + "txId=" + req.version() + ", node=" + nearNode.id() + ", reqTopVer=" + req.topologyVersion() + ", locTopVer=" + top.readyTopologyVersion() + ", req=" + req + ']');
}
}
if (retry) {
GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(req.partition(), req.version(), req.futureId(), req.miniId(), req.version(), req.version(), null, null, top.lastTopologyChangeVersion(), req.onePhaseCommit(), req.deployInfo() != null);
try {
ctx.io().send(nearNode, res, req.policy());
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Sent remap response for near prepare [txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (ClusterTopologyCheckedException ignored) {
if (txPrepareMsgLog.isDebugEnabled()) {
txPrepareMsgLog.debug("Failed to send remap response for near prepare, node failed [" + "txId=" + req.version() + ", node=" + nearNode.id() + ']');
}
} catch (IgniteCheckedException e) {
U.error(txPrepareMsgLog, "Failed to send remap response for near prepare " + "[txId=" + req.version() + ", node=" + nearNode.id() + ", req=" + req + ']', e);
}
return new GridFinishedFuture<>(res);
}
assert topFut.isDone();
}
tx = new GridDhtTxLocal(ctx, req.topologyVersion(), nearNode.id(), req.version(), req.futureId(), req.miniId(), req.threadId(), req.implicitSingle(), req.implicitSingle(), req.system(), req.explicitLock(), req.policy(), req.concurrency(), req.isolation(), req.timeout(), req.isInvalidate(), true, req.onePhaseCommit(), req.txSize(), req.transactionNodes(), securitySubjectId(ctx), req.taskNameHash(), req.txLabel(), originTx);
tx = ctx.tm().onCreated(null, tx);
if (tx != null)
tx.topologyVersion(req.topologyVersion());
else
U.warn(log, "Failed to create local transaction (was transaction rolled back?) [xid=" + req.version() + ", req=" + req + ']');
} finally {
if (tx != null)
req.txState(tx.txState());
if (top != null)
top.readUnlock();
}
}
if (tx != null) {
req.txState(tx.txState());
if (req.explicitLock())
tx.explicitLock(true);
tx.transactionNodes(req.transactionNodes());
if (req.near())
tx.nearOnOriginatingNode(true);
if (req.onePhaseCommit()) {
assert req.last() : req;
tx.onePhaseCommit(true);
}
if (req.needReturnValue())
tx.needReturnValue(true);
IgniteInternalFuture<GridNearTxPrepareResponse> fut = tx.prepareAsync(req);
if (tx.isRollbackOnly() && !tx.commitOnPrepare()) {
if (tx.state() != TransactionState.ROLLED_BACK && tx.state() != TransactionState.ROLLING_BACK)
tx.rollbackDhtLocalAsync();
}
final GridDhtTxLocal tx0 = tx;
fut.listen(new CI1<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> txFut) {
try {
txFut.get();
} catch (IgniteCheckedException e) {
// Just in case.
tx0.setRollbackOnly();
if (!X.hasCause(e, IgniteTxOptimisticCheckedException.class) && !X.hasCause(e, IgniteFutureCancelledException.class) && !ctx.kernalContext().isStopping())
U.error(log, "Failed to prepare DHT transaction: " + tx0, e);
}
}
});
return fut;
} else
return new GridFinishedFuture<>((GridNearTxPrepareResponse) null);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class CacheAffinitySharedManager method initAffinityOnNodeJoin.
/**
* @param evts Discovery events processed during exchange.
* @param addedOnExchnage {@code True} if cache group was added during this exchange.
* @param grpHolder Group holder.
* @param rebalanceInfo Rebalance information on coordinator or null on other nodes.
* @param latePrimary If {@code true} delays primary assignment if it is not owner.
*/
private void initAffinityOnNodeJoin(ExchangeDiscoveryEvents evts, boolean addedOnExchnage, CacheGroupHolder grpHolder, @Nullable WaitRebalanceInfo rebalanceInfo, boolean latePrimary) {
GridAffinityAssignmentCache aff = grpHolder.affinity();
if (addedOnExchnage) {
if (!aff.lastVersion().equals(evts.topologyVersion()))
calculateAndInit(evts, aff, evts.topologyVersion());
return;
}
AffinityTopologyVersion affTopVer = aff.lastVersion();
assert affTopVer.topologyVersion() > 0 : "Affinity is not initialized [grp=" + aff.cacheOrGroupName() + ", topVer=" + affTopVer + ", node=" + cctx.localNodeId() + ']';
List<List<ClusterNode>> curAff = aff.assignments(affTopVer);
assert aff.idealAssignment() != null : "Previous assignment is not available.";
List<List<ClusterNode>> idealAssignment = aff.calculate(evts.topologyVersion(), evts, evts.discoveryCache()).assignment();
List<List<ClusterNode>> newAssignment = null;
if (latePrimary) {
for (int p = 0; p < idealAssignment.size(); p++) {
List<ClusterNode> newNodes = idealAssignment.get(p);
List<ClusterNode> curNodes = curAff.get(p);
ClusterNode curPrimary = !curNodes.isEmpty() ? curNodes.get(0) : null;
ClusterNode newPrimary = !newNodes.isEmpty() ? newNodes.get(0) : null;
if (curPrimary != null && newPrimary != null && !curPrimary.equals(newPrimary)) {
assert cctx.discovery().node(evts.topologyVersion(), curPrimary.id()) != null : curPrimary;
List<ClusterNode> nodes0 = latePrimaryAssignment(aff, p, curPrimary, newNodes, rebalanceInfo);
if (newAssignment == null)
newAssignment = new ArrayList<>(idealAssignment);
newAssignment.set(p, nodes0);
}
GridDhtPartitionTopology top = grpHolder.topology(evts.discoveryCache());
if (rebalanceInfo != null) {
List<ClusterNode> owners = top.owners(p, evts.topologyVersion());
// A group with lost partitions never gets rebalanced so should not be added to waitInfo.
if (!owners.isEmpty() && !owners.containsAll(idealAssignment.get(p)) && !top.lostPartitions().contains(p))
rebalanceInfo.add(aff.groupId(), p, newNodes);
}
}
}
if (newAssignment == null)
newAssignment = idealAssignment;
aff.initialize(evts.topologyVersion(), newAssignment);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method resetLostPartitions.
/**
* @param cacheNames Cache names.
*/
private void resetLostPartitions(Collection<String> cacheNames) {
assert !exchCtx.mergeExchanges();
try {
doInParallelUninterruptibly(U.availableThreadCount(cctx.kernalContext(), GridIoPolicy.SYSTEM_POOL, 2), cctx.kernalContext().pools().getSystemExecutorService(), cctx.affinity().caches().values(), desc -> {
if (desc.cacheConfiguration().getCacheMode() == CacheMode.LOCAL)
return null;
if (cacheNames.contains(desc.cacheName())) {
CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId());
GridDhtPartitionTopology top = grp != null ? grp.topology() : cctx.exchange().clientTopology(desc.groupId(), events().discoveryCache());
top.resetLostPartitions(initialVersion());
}
return null;
});
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
Aggregations