use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class GridCacheContext method checkAndReservePartition.
/**
* @param part Partition.
* @param topVer Topology version.
* @return {@code True} if partition is available locally.
*/
private boolean checkAndReservePartition(int part, AffinityTopologyVersion topVer) {
assert affinityNode();
GridDhtPartitionTopology top = topology();
if (isReplicated() && !group().persistenceEnabled()) {
boolean rebFinished = top.rebalanceFinished(topVer);
if (rebFinished)
return true;
GridDhtLocalPartition locPart = top.localPartition(part, topVer, false, false);
// No need to reserve a partition for REPLICATED cache because this partition cannot be evicted.
return locPart != null && locPart.state() == OWNING;
} else {
GridDhtLocalPartition locPart = top.localPartition(part, topVer, false, false);
if (locPart != null && locPart.reserve()) {
boolean canRead = true;
try {
canRead = locPart.state() == OWNING;
return canRead;
} finally {
if (!canRead)
locPart.release();
}
} else
return false;
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgniteTxLocalAdapter method calculatePartitionUpdateCounters.
/**
* Calculates partition update counters for current transaction. Each partition will be supplied with
* pair (init, delta) values, where init - initial update counter, and delta - updates count made
* by current transaction for a given partition.
*/
public void calculatePartitionUpdateCounters() throws IgniteTxRollbackCheckedException {
TxCounters counters = txCounters(false);
if (counters != null && F.isEmpty(counters.updateCounters())) {
List<PartitionUpdateCountersMessage> cntrMsgs = new ArrayList<>();
for (Map.Entry<Integer, Map<Integer, AtomicLong>> record : counters.accumulatedUpdateCounters().entrySet()) {
int cacheId = record.getKey();
Map<Integer, AtomicLong> partToCntrs = record.getValue();
assert partToCntrs != null;
if (F.isEmpty(partToCntrs))
continue;
PartitionUpdateCountersMessage msg = new PartitionUpdateCountersMessage(cacheId, partToCntrs.size());
GridCacheContext ctx0 = cctx.cacheContext(cacheId);
GridDhtPartitionTopology top = ctx0.topology();
assert top != null;
for (Map.Entry<Integer, AtomicLong> e : partToCntrs.entrySet()) {
AtomicLong acc = e.getValue();
assert acc != null;
long cntr = acc.get();
assert cntr >= 0;
if (cntr != 0) {
int p = e.getKey();
GridDhtLocalPartition part = top.localPartition(p);
// Verify primary tx mapping.
// LOST state is possible if tx is started over LOST partition.
boolean valid = part != null && (part.state() == OWNING || part.state() == LOST) && part.primary(top.readyTopologyVersion());
if (!valid) {
// Local node is no longer primary for the partition, need to rollback a transaction.
if (part != null && !part.primary(top.readyTopologyVersion())) {
log.warning("Failed to prepare a transaction on outdated topology, rolling back " + "[tx=" + CU.txString(this) + ", readyTopVer=" + top.readyTopologyVersion() + ", lostParts=" + top.lostPartitions() + ", part=" + part.toString() + ']');
throw new IgniteTxRollbackCheckedException("Failed to prepare a transaction on outdated " + "topology, please try again [timeout=" + timeout() + ", tx=" + CU.txString(this) + ']');
}
// Trigger error.
throw new AssertionError("Invalid primary mapping [tx=" + CU.txString(this) + ", readyTopVer=" + top.readyTopologyVersion() + ", lostParts=" + top.lostPartitions() + ", part=" + (part == null ? "NULL" : part.toString()) + ']');
}
msg.add(p, part.getAndIncrementUpdateCounter(cntr), cntr);
}
}
if (msg.size() > 0)
cntrMsgs.add(msg);
}
counters.updateCounters(cntrMsgs);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgniteTxAdapter method applyTxSizes.
/**
* Makes cache sizes changes accumulated during transaction visible outside of transaction.
*/
protected void applyTxSizes() {
TxCounters txCntrs = txCounters(false);
if (txCntrs == null)
return;
Map<Integer, ? extends Map<Integer, AtomicLong>> sizeDeltas = txCntrs.sizeDeltas();
for (Map.Entry<Integer, ? extends Map<Integer, AtomicLong>> entry : sizeDeltas.entrySet()) {
Integer cacheId = entry.getKey();
Map<Integer, AtomicLong> deltas = entry.getValue();
assert !F.isEmpty(deltas);
GridDhtPartitionTopology top = cctx.cacheContext(cacheId).topology();
// Need to reserve on backups only
boolean reserve = dht() && remote();
for (Map.Entry<Integer, AtomicLong> e : deltas.entrySet()) {
boolean invalid = false;
int p = e.getKey();
long delta = e.getValue().get();
try {
GridDhtLocalPartition part = top.localPartition(p);
if (!reserve || part != null && part.reserve()) {
assert part != null;
try {
if (part.state() != GridDhtPartitionState.RENTING)
part.dataStore().updateSize(cacheId, delta);
else
invalid = true;
} finally {
if (reserve)
part.release();
}
} else
invalid = true;
} catch (GridDhtInvalidPartitionException e1) {
invalid = true;
}
if (invalid) {
assert reserve;
if (log.isDebugEnabled())
log.debug("Trying to apply size delta for invalid partition: " + "[cacheId=" + cacheId + ", part=" + p + "]");
}
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgniteTxHandler method filterUpdateCountersForBackupNode.
/**
* @param tx Transaction.
* @param node Backup node.
* @return Partition counters for the given backup node.
*/
@Nullable
public List<PartitionUpdateCountersMessage> filterUpdateCountersForBackupNode(IgniteInternalTx tx, ClusterNode node) {
TxCounters txCntrs = tx.txCounters(false);
Collection<PartitionUpdateCountersMessage> updCntrs;
if (txCntrs == null || F.isEmpty(updCntrs = txCntrs.updateCounters()))
return null;
List<PartitionUpdateCountersMessage> res = new ArrayList<>(updCntrs.size());
AffinityTopologyVersion top = tx.topologyVersionSnapshot();
for (PartitionUpdateCountersMessage partCntrs : updCntrs) {
GridDhtPartitionTopology topology = ctx.cacheContext(partCntrs.cacheId()).topology();
PartitionUpdateCountersMessage resCntrs = new PartitionUpdateCountersMessage(partCntrs.cacheId(), partCntrs.size());
for (int i = 0; i < partCntrs.size(); i++) {
int part = partCntrs.partition(i);
if (topology.nodes(part, top).indexOf(node) > 0)
resCntrs.add(part, partCntrs.initialCounter(i), partCntrs.updatesCount(i));
}
if (resCntrs.size() > 0)
res.add(resCntrs);
}
return res;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology in project ignite by apache.
the class IgniteTxHandler method applyPartitionsUpdatesCounters.
/**
* Applies partition counter updates for transactions.
* <p>
* Called after entries are written to WAL on commit or during rollback to close gaps in update counter sequence.
* <p>
* On rollback counters should be applied on the primary only after backup nodes, otherwise if the primary fail
* before sending rollback requests to backups remote transactions can be committed by recovery protocol and
* partition consistency will not be restored when primary returns to the grid because RollbackRecord was written
* (actual for persistent mode only).
*
* @param counters Counter values to be updated.
* @param rollback {@code True} if applied during rollbacks.
* @param rollbackOnPrimary {@code True} if rollback happens on primary node. Passed to CQ engine.
*/
public void applyPartitionsUpdatesCounters(Iterable<PartitionUpdateCountersMessage> counters, boolean rollback, boolean rollbackOnPrimary) throws IgniteCheckedException {
if (counters == null)
return;
WALPointer ptr = null;
try {
for (PartitionUpdateCountersMessage counter : counters) {
GridCacheContext ctx0 = ctx.cacheContext(counter.cacheId());
GridDhtPartitionTopology top = ctx0.topology();
AffinityTopologyVersion topVer = top.readyTopologyVersion();
assert top != null;
for (int i = 0; i < counter.size(); i++) {
boolean invalid = false;
try {
GridDhtLocalPartition part = top.localPartition(counter.partition(i));
if (part != null && part.reserve()) {
try {
if (part.state() != RENTING) {
// Check is actual only for backup node.
long start = counter.initialCounter(i);
long delta = counter.updatesCount(i);
boolean updated = part.updateCounter(start, delta);
// Need to log rolled back range for logical recovery.
if (updated && rollback) {
CacheGroupContext grpCtx = part.group();
if (grpCtx.persistenceEnabled() && grpCtx.walEnabled() && !grpCtx.mvccEnabled()) {
RollbackRecord rec = new RollbackRecord(grpCtx.groupId(), part.id(), start, delta);
ptr = ctx.wal().log(rec);
}
for (int cntr = 1; cntr <= delta; cntr++) {
ctx0.continuousQueries().skipUpdateCounter(null, part.id(), start + cntr, topVer, rollbackOnPrimary);
}
}
} else
invalid = true;
} finally {
part.release();
}
} else
invalid = true;
} catch (GridDhtInvalidPartitionException e) {
invalid = true;
}
if (log.isDebugEnabled() && invalid) {
log.debug("Received partition update counters message for invalid partition, ignoring: " + "[cacheId=" + counter.cacheId() + ", part=" + counter.partition(i) + ']');
}
}
}
} finally {
if (ptr != null)
ctx.wal().flush(ptr, false);
}
}
Aggregations