use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridMapQueryExecutor method reservePartitions.
/**
* @param cacheIds Cache IDs.
* @param topVer Topology version.
* @param explicitParts Explicit partitions list.
* @param reserved Reserved list.
* @return {@code true} If all the needed partitions successfully reserved.
* @throws IgniteCheckedException If failed.
*/
private boolean reservePartitions(@Nullable List<Integer> cacheIds, AffinityTopologyVersion topVer, final int[] explicitParts, List<GridReservable> reserved) throws IgniteCheckedException {
assert topVer != null;
if (F.isEmpty(cacheIds))
return true;
Collection<Integer> partIds = wrap(explicitParts);
for (int i = 0; i < cacheIds.size(); i++) {
GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(cacheIds.get(i));
if (// Cache was not found, probably was not deployed yet.
cctx == null)
return false;
if (cctx.isLocal() || !cctx.rebalanceEnabled())
continue;
// For replicated cache topology version does not make sense.
final T2<String, AffinityTopologyVersion> grpKey = new T2<>(cctx.name(), cctx.isReplicated() ? null : topVer);
GridReservable r = reservations.get(grpKey);
if (explicitParts == null && r != null) {
// Try to reserve group partition if any and no explicits.
if (r != ReplicatedReservation.INSTANCE) {
if (!r.reserve())
// We need explicit partitions here -> retry.
return false;
reserved.add(r);
}
} else {
// Try to reserve partitions one by one.
int partsCnt = cctx.affinity().partitions();
if (cctx.isReplicated()) {
// Check all the partitions are in owning state for replicated cache.
if (r == null) {
// Check only once.
for (int p = 0; p < partsCnt; p++) {
GridDhtLocalPartition part = partition(cctx, p);
// We don't need to reserve partitions because they will not be evicted in replicated caches.
if (part == null || part.state() != OWNING)
return false;
}
// Mark that we checked this replicated cache.
reservations.putIfAbsent(grpKey, ReplicatedReservation.INSTANCE);
}
} else {
// Reserve primary partitions for partitioned cache (if no explicit given).
if (explicitParts == null)
partIds = cctx.affinity().primaryPartitions(ctx.localNodeId(), topVer);
for (int partId : partIds) {
GridDhtLocalPartition part = partition(cctx, partId);
if (part == null || part.state() != OWNING || !part.reserve())
return false;
reserved.add(part);
// Double check that we are still in owning state and partition contents are not cleared.
if (part.state() != OWNING)
return false;
}
if (explicitParts == null) {
// We reserved all the primary partitions for cache, attempt to add group reservation.
GridDhtPartitionsReservation grp = new GridDhtPartitionsReservation(topVer, cctx, "SQL");
if (grp.register(reserved.subList(reserved.size() - partIds.size(), reserved.size()))) {
if (reservations.putIfAbsent(grpKey, grp) != null)
throw new IllegalStateException("Reservation already exists.");
grp.onPublish(new CI1<GridDhtPartitionsReservation>() {
@Override
public void apply(GridDhtPartitionsReservation r) {
reservations.remove(grpKey, r);
}
});
}
}
}
}
}
return true;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridCommonAbstractTest method printPartitionState.
/**
* @param cacheName Cache name.
* @param firstParts Count partition for print (will be print first count partition).
*
* Print partitionState for cache.
*/
protected void printPartitionState(String cacheName, int firstParts) {
StringBuilder sb = new StringBuilder();
sb.append("----preload sync futures----\n");
for (Ignite ig : G.allGrids()) {
IgniteKernal k = ((IgniteKernal) ig);
IgniteInternalFuture<?> syncFut = k.internalCache(cacheName).preloader().syncFuture();
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(syncFut.isDone()).append("\n");
}
sb.append("----rebalance futures----\n");
for (Ignite ig : G.allGrids()) {
IgniteKernal k = ((IgniteKernal) ig);
IgniteInternalFuture<?> f = k.internalCache(cacheName).preloader().rebalanceFuture();
try {
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(f.isDone()).append(" res=").append(f.isDone() ? f.get() : "N/A").append(" topVer=").append((U.hasField(f, "topVer") ? U.field(f, "topVer") : "[unknown] may be it is finished future")).append("\n");
Map<UUID, T2<Long, Collection<Integer>>> remaining = U.field(f, "remaining");
sb.append("remaining:");
if (remaining.isEmpty())
sb.append("empty\n");
else
for (Map.Entry<UUID, T2<Long, Collection<Integer>>> e : remaining.entrySet()) sb.append("\nuuid=").append(e.getKey()).append(" startTime=").append(e.getValue().getKey()).append(" parts=").append(Arrays.toString(e.getValue().getValue().toArray())).append("\n");
} catch (Throwable e) {
log.error(e.getMessage());
}
}
sb.append("----partition state----\n");
for (Ignite g : G.allGrids()) {
IgniteKernal g0 = (IgniteKernal) g;
sb.append("localNodeId=").append(g0.localNode().id()).append(" grid=").append(g0.name()).append("\n");
IgniteCacheProxy<?, ?> cache = g0.context().cache().jcache(cacheName);
GridDhtCacheAdapter<?, ?> dht = dht(cache);
GridDhtPartitionTopology top = dht.topology();
int parts = firstParts == 0 ? cache.context().config().getAffinity().partitions() : firstParts;
for (int p = 0; p < parts; p++) {
AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
Collection<UUID> affNodes = F.nodeIds(dht.context().affinity().assignment(readyVer).idealAssignment().get(p));
GridDhtLocalPartition part = top.localPartition(p, AffinityTopologyVersion.NONE, false);
sb.append("local part=");
if (part != null)
sb.append(p).append(" state=").append(part.state());
else
sb.append(p).append(" is null");
sb.append(" isAffNode=").append(affNodes.contains(g0.localNode().id())).append("\n");
for (UUID nodeId : F.nodeIds(g0.context().discovery().allNodes())) {
if (!nodeId.equals(g0.localNode().id()))
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" state=").append(top.partitionState(nodeId, p)).append(" isAffNode=").append(affNodes.contains(nodeId)).append("\n");
}
}
sb.append("\n");
}
log.info("dump partitions state for <" + cacheName + ">:\n" + sb.toString());
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class IgniteTxHandler method startRemoteTx.
/**
* @param nodeId Node ID.
* @param req Request.
* @param res Response.
* @return Remote transaction.
* @throws IgniteCheckedException If failed.
*/
@Nullable
GridDhtTxRemote startRemoteTx(UUID nodeId, GridDhtTxPrepareRequest req, GridDhtTxPrepareResponse res) throws IgniteCheckedException {
if (!F.isEmpty(req.writes())) {
GridDhtTxRemote tx = ctx.tm().tx(req.version());
if (tx == null) {
boolean single = req.last() && req.writes().size() == 1;
tx = new GridDhtTxRemote(ctx, req.nearNodeId(), req.futureId(), nodeId, req.topologyVersion(), req.version(), null, req.system(), req.policy(), req.concurrency(), req.isolation(), req.isInvalidate(), req.timeout(), req.writes() != null ? Math.max(req.writes().size(), req.txSize()) : req.txSize(), req.nearXidVersion(), req.transactionNodes(), req.subjectId(), req.taskNameHash(), single);
tx.writeVersion(req.writeVersion());
tx = ctx.tm().onCreated(null, tx);
if (tx == null || !ctx.tm().onStarted(tx)) {
if (log.isDebugEnabled())
log.debug("Attempt to start a completed transaction (will ignore): " + tx);
return null;
}
if (ctx.discovery().node(nodeId) == null) {
tx.state(ROLLING_BACK);
tx.state(ROLLED_BACK);
ctx.tm().uncommitTx(tx);
return null;
}
} else {
tx.writeVersion(req.writeVersion());
tx.transactionNodes(req.transactionNodes());
}
if (!tx.isSystemInvalidate()) {
int idx = 0;
for (IgniteTxEntry entry : req.writes()) {
GridCacheContext cacheCtx = entry.context();
int part = cacheCtx.affinity().partition(entry.key());
GridDhtLocalPartition locPart = cacheCtx.topology().localPartition(part, req.topologyVersion(), false);
if (locPart != null && locPart.reserve()) {
try {
tx.addWrite(entry, ctx.deploy().globalLoader());
if (isNearEnabled(cacheCtx) && req.invalidateNearEntry(idx))
invalidateNearEntry(cacheCtx, entry.key(), req.version());
if (req.needPreloadKey(idx)) {
GridCacheEntryEx cached = entry.cached();
if (cached == null)
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
GridCacheEntryInfo info = cached.info();
if (info != null && !info.isNew() && !info.isDeleted())
res.addPreloadEntry(info);
}
if (cacheCtx.readThroughConfigured() && !entry.skipStore() && entry.op() == TRANSFORM && entry.oldValueOnPrimary() && !entry.hasValue()) {
while (true) {
try {
GridCacheEntryEx cached = entry.cached();
if (cached == null) {
cached = cacheCtx.cache().entryEx(entry.key(), req.topologyVersion());
entry.cached(cached);
}
CacheObject val = cached.innerGet(/*ver*/
null, tx, /*readThrough*/
false, /*updateMetrics*/
false, /*evt*/
false, tx.subjectId(), /*transformClo*/
null, tx.resolveTaskName(), /*expiryPlc*/
null, /*keepBinary*/
true);
if (val == null)
val = cacheCtx.toCacheObject(cacheCtx.store().load(null, entry.key()));
if (val != null)
entry.readValue(val);
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got entry removed exception, will retry: " + entry.txKey());
entry.cached(cacheCtx.cache().entryEx(entry.key(), req.topologyVersion()));
}
}
}
} catch (GridDhtInvalidPartitionException e) {
tx.addInvalidPartition(cacheCtx, e.partition());
tx.clearEntry(entry.txKey());
} finally {
locPart.release();
}
} else
tx.addInvalidPartition(cacheCtx, part);
idx++;
}
}
// Prepare prior to reordering, so the pending locks added
// in prepare phase will get properly ordered as well.
tx.prepareRemoteTx();
if (req.last()) {
assert !F.isEmpty(req.transactionNodes()) : "Received last prepare request with empty transaction nodes: " + req;
tx.state(PREPARED);
}
res.invalidPartitionsByCacheId(tx.invalidPartitions());
if (tx.empty() && req.last()) {
tx.rollbackRemoteTx();
return null;
}
return tx;
}
return null;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class CacheDeferredDeleteQueueTest method testQueue.
/**
* @param atomicityMode Cache atomicity mode.
* @param nearCache {@code True} if need create near cache.
*
* @throws Exception If failed.
*/
private void testQueue(CacheAtomicityMode atomicityMode, boolean nearCache) throws Exception {
CacheConfiguration<Integer, Integer> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
ccfg.setCacheMode(PARTITIONED);
ccfg.setAtomicityMode(atomicityMode);
ccfg.setWriteSynchronizationMode(FULL_SYNC);
ccfg.setBackups(1);
if (nearCache)
ccfg.setNearConfiguration(new NearCacheConfiguration<Integer, Integer>());
IgniteCache<Integer, Integer> cache = ignite(0).createCache(ccfg);
try {
final int KEYS = cache.getConfiguration(CacheConfiguration.class).getAffinity().partitions() * 3;
for (int i = 0; i < KEYS; i++) cache.put(i, i);
for (int i = 0; i < KEYS; i++) cache.remove(i);
boolean wait = GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (int i = 0; i < NODES; i++) {
final GridDhtPartitionTopology top = ((IgniteKernal) ignite(i)).context().cache().cache(DEFAULT_CACHE_NAME).context().topology();
for (GridDhtLocalPartition p : top.currentLocalPartitions()) {
Collection<Object> rmvQueue = GridTestUtils.getFieldValue(p, "rmvQueue");
if (!rmvQueue.isEmpty() || p.dataStore().size() != 0)
return false;
}
}
return true;
}
}, 5000);
assertTrue("Failed to wait for rmvQueue cleanup.", wait);
} finally {
ignite(0).destroyCache(ccfg.getName());
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridCacheRebalancingSyncSelfTest method checkPartitionMapExchangeFinished.
/**
*
*/
protected void checkPartitionMapExchangeFinished() {
for (Ignite g : G.allGrids()) {
IgniteKernal g0 = (IgniteKernal) g;
for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
CacheConfiguration cfg = c.context().config();
if (cfg.getCacheMode() != LOCAL && cfg.getRebalanceMode() != NONE) {
GridDhtCacheAdapter<?, ?> dht = dht(c);
GridDhtPartitionTopology top = dht.topology();
List<GridDhtLocalPartition> locs = top.localPartitions();
for (GridDhtLocalPartition loc : locs) {
GridDhtPartitionState actl = loc.state();
boolean res = GridDhtPartitionState.OWNING.equals(actl);
if (!res)
printPartitionState(c);
assertTrue("Wrong local partition state part=" + loc.id() + ", should be OWNING [state=" + actl + "], node=" + g0.name() + " cache=" + c.getName(), res);
Collection<ClusterNode> affNodes = g0.affinity(cfg.getName()).mapPartitionToPrimaryAndBackups(loc.id());
assertTrue(affNodes.contains(g0.localNode()));
}
for (Ignite remote : G.allGrids()) {
IgniteKernal remote0 = (IgniteKernal) remote;
IgniteCacheProxy<?, ?> remoteC = remote0.context().cache().jcache(cfg.getName());
GridDhtCacheAdapter<?, ?> remoteDht = dht(remoteC);
GridDhtPartitionTopology remoteTop = remoteDht.topology();
GridDhtPartitionMap pMap = remoteTop.partitionMap(true).get(((IgniteKernal) g).getLocalNodeId());
assertEquals(pMap.size(), locs.size());
for (Map.Entry entry : pMap.entrySet()) {
assertTrue("Wrong remote partition state part=" + entry.getKey() + ", should be OWNING [state=" + entry.getValue() + "], node=" + remote.name() + " cache=" + c.getName(), entry.getValue() == GridDhtPartitionState.OWNING);
}
for (GridDhtLocalPartition loc : locs) assertTrue(pMap.containsKey(loc.id()));
}
}
}
}
log.info("checkPartitionMapExchangeFinished finished");
}
Aggregations