use of org.apache.ignite.cache.PartitionLossPolicy in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method validateCache.
/** {@inheritDoc} */
@Nullable
@Override
public Throwable validateCache(GridCacheContext cctx, boolean recovery, boolean read, @Nullable Object key, @Nullable Collection<?> keys) {
assert isDone() : this;
Throwable err = error();
if (err != null)
return err;
if (!cctx.shared().kernalContext().state().active())
return new CacheInvalidStateException("Failed to perform cache operation (cluster is not activated): " + cctx.name());
PartitionLossPolicy partLossPlc = cctx.config().getPartitionLossPolicy();
if (cctx.needsRecovery() && !recovery) {
if (!read && (partLossPlc == READ_ONLY_SAFE || partLossPlc == READ_ONLY_ALL))
return new IgniteCheckedException("Failed to write to cache (cache is moved to a read-only state): " + cctx.name());
}
if (cctx.needsRecovery() || cctx.config().getTopologyValidator() != null) {
CacheValidation validation = cacheValidRes.get(cctx.cacheId());
if (validation == null)
return null;
if (!validation.valid && !read)
return new IgniteCheckedException("Failed to perform cache operation " + "(cache topology is not valid): " + cctx.name());
if (recovery || !cctx.needsRecovery())
return null;
if (key != null) {
int p = cctx.affinity().partition(key);
CacheInvalidStateException ex = validatePartitionOperation(cctx.name(), read, key, p, validation.lostParts, partLossPlc);
if (ex != null)
return ex;
}
if (keys != null) {
for (Object k : keys) {
int p = cctx.affinity().partition(k);
CacheInvalidStateException ex = validatePartitionOperation(cctx.name(), read, k, p, validation.lostParts, partLossPlc);
if (ex != null)
return ex;
}
}
}
return null;
}
use of org.apache.ignite.cache.PartitionLossPolicy in project ignite by apache.
the class GridDhtPartitionTopologyImpl method detectLostPartitions.
/** {@inheritDoc} */
@Override
public boolean detectLostPartitions(DiscoveryEvent discoEvt) {
lock.writeLock().lock();
try {
int parts = cctx.affinity().partitions();
Collection<Integer> lost = null;
for (int p = 0; p < parts; p++) {
boolean foundOwner = false;
Set<UUID> nodeIds = part2node.get(p);
if (nodeIds != null) {
for (UUID nodeId : nodeIds) {
GridDhtPartitionMap partMap = node2part.get(nodeId);
GridDhtPartitionState state = partMap.get(p);
if (state == OWNING) {
foundOwner = true;
break;
}
}
}
if (!foundOwner) {
if (lost == null)
lost = new HashSet<>(parts - p, 1.0f);
lost.add(p);
}
}
boolean changed = false;
if (lost != null) {
PartitionLossPolicy plc = cctx.config().getPartitionLossPolicy();
assert plc != null;
// Update partition state on all nodes.
for (Integer part : lost) {
long updSeq = updateSeq.incrementAndGet();
GridDhtLocalPartition locPart = localPartition(part, topVer, false);
if (locPart != null) {
boolean marked = plc == PartitionLossPolicy.IGNORE ? locPart.own() : locPart.markLost();
if (marked)
updateLocal(locPart.id(), locPart.state(), updSeq);
changed |= marked;
} else // Update map for remote node.
if (plc != PartitionLossPolicy.IGNORE) {
Set<UUID> nodeIds = part2node.get(part);
if (nodeIds != null) {
for (UUID nodeId : nodeIds) {
GridDhtPartitionMap nodeMap = node2part.get(nodeId);
if (nodeMap.get(part) != EVICTED)
nodeMap.put(part, LOST);
}
}
}
if (cctx.events().isRecordable(EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST))
cctx.events().addPreloadEvent(part, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
}
if (plc != PartitionLossPolicy.IGNORE)
cctx.needsRecovery(true);
}
return changed;
} finally {
lock.writeLock().unlock();
}
}
Aggregations