use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridCacheAbstractQueueFailoverDataConsistencySelfTest method primaryQueueNode.
/**
* @param queue Queue.
* @return Primary node for queue's header.
* @throws Exception If failed.
*/
private int primaryQueueNode(IgniteQueue queue) throws Exception {
GridCacheContext cctx = GridTestUtils.getFieldValue(queue, "cctx");
GridCacheAffinityManager aff = cctx.affinity();
CachePeekMode[] modes = new CachePeekMode[] { CachePeekMode.ALL };
for (int i = 0; i < gridCount(); i++) {
for (Cache.Entry e : grid(i).context().cache().internalCache(cctx.name()).localEntries(modes)) {
Object key = e.getKey();
if (aff.primaryByKey(grid(i).localNode(), key, AffinityTopologyVersion.NONE) && key instanceof GridCacheQueueHeaderKey)
return i;
}
}
fail("Failed to find primary node for queue header.");
return -1;
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class CacheLateAffinityAssignmentTest method checkAffinity.
/**
* @param expNodes Expected nodes number.
* @param topVer Topology version.
* @param expIdeal If {@code true} expect ideal affinity assignment.
* @param checkPublicApi {@code True} to check {@link Affinity} API.
* @throws Exception If failed.
* @return Affinity assignments.
*/
@SuppressWarnings("unchecked")
private Map<String, List<List<ClusterNode>>> checkAffinity(int expNodes, AffinityTopologyVersion topVer, boolean expIdeal, boolean checkPublicApi) throws Exception {
List<Ignite> nodes = G.allGrids();
Map<String, List<List<ClusterNode>>> aff = new HashMap<>();
for (Ignite node : nodes) {
log.info("Check affinity [node=" + node.name() + ", topVer=" + topVer + ", expIdeal=" + expIdeal + ']');
IgniteKernal node0 = (IgniteKernal) node;
IgniteInternalFuture<?> fut = node0.context().cache().context().exchange().affinityReadyFuture(topVer);
if (fut != null)
fut.get();
for (GridCacheContext cctx : node0.context().cache().context().cacheContexts()) {
if (cctx.startTopologyVersion().compareTo(topVer) > 0)
continue;
List<List<ClusterNode>> aff1 = aff.get(cctx.name());
List<List<ClusterNode>> aff2 = cctx.affinity().assignments(topVer);
if (aff1 == null)
aff.put(cctx.name(), aff2);
else
assertAffinity(aff1, aff2, node, cctx.name(), topVer);
if (expIdeal) {
List<List<ClusterNode>> ideal = idealAssignment(topVer, cctx.cacheId());
assertAffinity(ideal, aff2, node, cctx.name(), topVer);
if (checkPublicApi) {
Affinity<Object> cacheAff = node.affinity(cctx.name());
for (int i = 0; i < 10; i++) {
int part = cacheAff.partition(i);
List<ClusterNode> partNodes = ideal.get(part);
if (partNodes.isEmpty()) {
try {
cacheAff.mapKeyToNode(i);
fail();
} catch (IgniteException ignore) {
// No-op.
}
} else {
ClusterNode primary = cacheAff.mapKeyToNode(i);
assertEquals(primary, partNodes.get(0));
}
}
for (int p = 0; p < ideal.size(); p++) {
List<ClusterNode> exp = ideal.get(p);
Collection<ClusterNode> partNodes = cacheAff.mapPartitionToPrimaryAndBackups(p);
assertEqualsCollections(exp, partNodes);
}
}
}
}
}
assertEquals(expNodes, nodes.size());
if (!skipCheckOrder)
checkOrderCounters(expNodes, topVer);
return aff;
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridDistributedTxPrepareRequest method prepareMarshal.
/**
* {@inheritDoc}
* @param ctx
*/
@Override
public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
super.prepareMarshal(ctx);
if (writes != null)
marshalTx(writes, ctx);
if (reads != null)
marshalTx(reads, ctx);
if (dhtVers != null && dhtVerKeys == null) {
for (IgniteTxKey key : dhtVers.keySet()) {
GridCacheContext cctx = ctx.cacheContext(key.cacheId());
key.prepareMarshal(cctx);
}
dhtVerKeys = dhtVers.keySet();
dhtVerVals = dhtVers.values();
}
if (txNodesMsg == null)
txNodesMsg = F.viewReadOnly(txNodes, COL_TO_MSG);
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridDistributedTxRemoteAdapter method commitIfLocked.
/**
* @throws IgniteCheckedException If commit failed.
*/
@SuppressWarnings({ "CatchGenericClass" })
private void commitIfLocked() throws IgniteCheckedException {
if (state() == COMMITTING) {
for (IgniteTxEntry txEntry : writeEntries()) {
assert txEntry != null : "Missing transaction entry for tx: " + this;
while (true) {
GridCacheEntryEx entry = txEntry.cached();
assert entry != null : "Missing cached entry for transaction entry: " + txEntry;
try {
GridCacheVersion ver = txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer;
// If locks haven't been acquired yet, keep waiting.
if (!entry.lockedBy(ver)) {
if (log.isDebugEnabled())
log.debug("Transaction does not own lock for entry (will wait) [entry=" + entry + ", tx=" + this + ']');
return;
}
// While.
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry while committing (will retry): " + txEntry);
txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion()));
}
}
}
// Only one thread gets to commit.
if (COMMIT_ALLOWED_UPD.compareAndSet(this, 0, 1)) {
IgniteCheckedException err = null;
Map<IgniteTxKey, IgniteTxEntry> writeMap = txState.writeMap();
GridCacheReturnCompletableWrapper wrapper = null;
if (!F.isEmpty(writeMap)) {
GridCacheReturn ret = null;
if (!near() && !local() && onePhaseCommit()) {
if (needReturnValue()) {
ret = new GridCacheReturn(null, cctx.localNodeId().equals(otherNodeId()), true, null, true);
// Originating node.
UUID origNodeId = otherNodeId();
cctx.tm().addCommittedTxReturn(this, wrapper = new GridCacheReturnCompletableWrapper(!cctx.localNodeId().equals(origNodeId) ? origNodeId : null));
} else
cctx.tm().addCommittedTx(this, this.nearXidVersion(), null);
}
// Register this transaction as completed prior to write-phase to
// ensure proper lock ordering for removed entries.
cctx.tm().addCommittedTx(this);
AffinityTopologyVersion topVer = topologyVersion();
WALPointer ptr = null;
cctx.database().checkpointReadLock();
try {
Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries();
List<DataEntry> dataEntries = null;
batchStoreCommit(writeMap().values());
try {
// Node that for near transactions we grab all entries.
for (IgniteTxEntry txEntry : entries) {
GridCacheContext cacheCtx = txEntry.context();
boolean replicate = cacheCtx.isDrEnabled();
try {
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
if (cached == null)
txEntry.cached(cached = cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
if (near() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
GridNearCacheEntry nearCached = null;
if (updateNearCache(cacheCtx, txEntry.key(), topVer))
nearCached = cacheCtx.dht().near().peekExx(txEntry.key());
if (!F.isEmpty(txEntry.entryProcessors()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, false, ret);
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
GridCacheVersion explicitVer = txEntry.conflictVersion();
if (explicitVer == null)
explicitVer = writeVersion();
if (txEntry.ttl() == CU.TTL_ZERO)
op = DELETE;
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> drRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert drRes != null;
conflictCtx = drRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else if (conflictCtx.isMerge()) {
op = drRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null;
if (!near() && cacheCtx.group().persistenceEnabled() && cacheCtx.group().walEnabled() && op != NOOP && op != RELOAD && (op != READ || cctx.snapshot().needTxReadLogging())) {
if (dataEntries == null)
dataEntries = new ArrayList<>(entries.size());
dataEntries.add(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), writeVersion(), 0, txEntry.key().partition(), txEntry.updateCounter()));
}
if (op == CREATE || op == UPDATE) {
// Invalidate only for near nodes (backups cannot be invalidated).
if (isSystemInvalidate() || (isInvalidate() && cacheCtx.isNear()))
cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
else {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, txEntry.conflictExpireTime(), near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), nodeId, false, true, true, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, replicate ? DR_BACKUP : DR_NONE, near() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, txEntry.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
// Keep near entry up to date.
if (nearCached != null)
nearCached.updateOrEvict(xidVer, null, 0, 0, nodeId, topVer);
} else if (op == RELOAD) {
CacheObject reloaded = cached.innerReload();
if (nearCached != null) {
nearCached.innerReload();
nearCached.updateOrEvict(cached.version(), reloaded, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
} else if (op == READ) {
assert near();
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else // No-op.
{
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
if (nearCached != null) {
CacheObject val0 = cached.valueBytes();
nearCached.updateOrEvict(xidVer, val0, cached.expireTime(), cached.ttl(), nodeId, topVer);
}
}
}
// that if we replaced removed entries.
assert txEntry.op() == READ || onePhaseCommit() || // and we simply allow the commit to proceed.
!cached.hasLockCandidateUnsafe(xidVer) || cached.lockedByUnsafe(xidVer) : "Transaction does not own lock for commit [entry=" + cached + ", tx=" + this + ']';
// Break out of while loop.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Attempting to commit a removed entry (will retry): " + txEntry);
// Renew cached entry.
txEntry.cached(cacheCtx.cache().entryEx(txEntry.key(), topologyVersion()));
}
}
} catch (Throwable ex) {
boolean nodeStopping = X.hasCause(ex, NodeStoppingException.class);
// In case of error, we still make the best effort to commit,
// as there is no way to rollback at this point.
err = new IgniteTxHeuristicCheckedException("Commit produced a runtime exception " + "(all transaction entries will be invalidated): " + CU.txString(this), ex);
if (nodeStopping) {
U.warn(log, "Failed to commit transaction, node is stopping [tx=" + this + ", err=" + ex + ']');
} else
U.error(log, "Commit failed.", err);
uncommit(nodeStopping);
state(UNKNOWN);
if (ex instanceof Error)
throw (Error) ex;
}
}
if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null)
cctx.wal().log(new DataRecord(dataEntries));
if (ptr != null && !cctx.tm().logTxRecords())
cctx.wal().fsync(ptr);
} catch (StorageException e) {
throw new IgniteCheckedException("Failed to log transaction record " + "(transaction will be rolled back): " + this, e);
}
} finally {
cctx.database().checkpointReadUnlock();
if (wrapper != null)
wrapper.initialize(ret);
}
}
if (err != null) {
state(UNKNOWN);
throw err;
}
cctx.tm().commitTx(this);
state(COMMITTED);
}
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheContext in project ignite by apache.
the class GridDhtTxFinishResponse method finishUnmarshal.
/**
* {@inheritDoc}
*/
@Override
public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
super.finishUnmarshal(ctx, ldr);
if (checkCommittedErrBytes != null && checkCommittedErr == null)
checkCommittedErr = U.unmarshal(ctx, checkCommittedErrBytes, U.resolveClassLoader(ldr, ctx.gridConfig()));
if (retVal != null && retVal.cacheId() != 0) {
GridCacheContext cctx = ctx.cacheContext(retVal.cacheId());
assert cctx != null : retVal.cacheId();
retVal.finishUnmarshal(cctx, ldr);
}
}
Aggregations