use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridCacheMapEntry method nextPartCounter.
/**
* @param topVer Topology version.
* @return Update counter.
*/
private long nextPartCounter(AffinityTopologyVersion topVer) {
long updateCntr;
if (!cctx.isLocal() && !isNear()) {
GridDhtLocalPartition locPart = cctx.topology().localPartition(partition(), topVer, false);
if (locPart == null)
return 0;
updateCntr = locPart.nextUpdateCounter();
} else
updateCntr = 0;
return updateCntr;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method entriesCount.
/** {@inheritDoc} */
@Override
public long entriesCount(boolean primary, boolean backup, AffinityTopologyVersion topVer) throws IgniteCheckedException {
if (cctx.isLocal())
return entriesCount(0);
else {
ClusterNode locNode = cctx.localNode();
long cnt = 0;
for (GridDhtLocalPartition locPart : cctx.topology().currentLocalPartitions()) {
if (primary) {
if (cctx.affinity().primaryByPartition(locNode, locPart.id(), topVer)) {
cnt += locPart.dataStore().size();
continue;
}
}
if (backup) {
if (cctx.affinity().backupByPartition(locNode, locPart.id(), topVer))
cnt += locPart.dataStore().size();
}
}
return cnt;
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridDhtPreloader method assign.
/** {@inheritDoc} */
@Override
public GridDhtPreloaderAssignments assign(GridDhtPartitionsExchangeFuture exchFut) {
// No assignments for disabled preloader.
GridDhtPartitionTopology top = cctx.dht().topology();
if (!cctx.rebalanceEnabled() || !cctx.shared().kernalContext().state().active())
return new GridDhtPreloaderAssignments(exchFut, top.topologyVersion());
int partCnt = cctx.affinity().partitions();
assert exchFut.forcePreload() || exchFut.dummyReassign() || exchFut.exchangeId().topologyVersion().equals(top.topologyVersion()) : "Topology version mismatch [exchId=" + exchFut.exchangeId() + ", cache=" + cctx.name() + ", topVer=" + top.topologyVersion() + ']';
GridDhtPreloaderAssignments assigns = new GridDhtPreloaderAssignments(exchFut, top.topologyVersion());
AffinityTopologyVersion topVer = assigns.topologyVersion();
for (int p = 0; p < partCnt; p++) {
if (cctx.shared().exchange().hasPendingExchange()) {
if (log.isDebugEnabled())
log.debug("Skipping assignments creation, exchange worker has pending assignments: " + exchFut.exchangeId());
assigns.cancelled(true);
return assigns;
}
// If partition belongs to local node.
if (cctx.affinity().partitionLocalNode(p, topVer)) {
GridDhtLocalPartition part = top.localPartition(p, topVer, true);
assert part != null;
assert part.id() == p;
if (part.state() != MOVING) {
if (log.isDebugEnabled())
log.debug("Skipping partition assignment (state is not MOVING): " + part);
// For.
continue;
}
Collection<ClusterNode> picked = pickedOwners(p, topVer);
if (picked.isEmpty()) {
top.own(part);
if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_PART_DATA_LOST)) {
DiscoveryEvent discoEvt = exchFut.discoveryEvent();
cctx.events().addPreloadEvent(p, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp());
}
if (log.isDebugEnabled())
log.debug("Owning partition as there are no other owners: " + part);
} else {
ClusterNode n = F.rand(picked);
GridDhtPartitionDemandMessage msg = assigns.get(n);
if (msg == null) {
assigns.put(n, msg = new GridDhtPartitionDemandMessage(top.updateSequence(), exchFut.exchangeId().topologyVersion(), cctx.cacheId()));
}
msg.addPartition(p);
}
}
}
return assigns;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridDhtPreloader method processForceKeysRequest0.
/**
* @param node Node originated request.
* @param msg Force keys message.
*/
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
if (!enterBusy())
return;
try {
ClusterNode loc = cctx.localNode();
GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(cctx.cacheId(), msg.futureId(), msg.miniId(), cctx.deploymentEnabled());
for (KeyCacheObject k : msg.keys()) {
int p = cctx.affinity().partition(k);
GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
// If this node is no longer an owner.
if (locPart == null && !top.owners(p).contains(loc)) {
res.addMissed(k);
continue;
}
GridCacheEntryEx entry = null;
while (true) {
try {
entry = cctx.dht().entryEx(k);
entry.unswap();
GridCacheEntryInfo info = entry.info();
if (info == null) {
assert entry.obsolete() : entry;
continue;
}
if (!info.isNew())
res.addInfo(info);
cctx.evicts().touch(entry, msg.topologyVersion());
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry: " + k);
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Local node is no longer an owner: " + p);
res.addMissed(k);
break;
}
}
}
if (log.isDebugEnabled())
log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
cctx.io().send(node, res, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition in project ignite by apache.
the class GridDhtPartitionSupplier method handleDemandMessage.
/**
* @param d Demand message.
* @param idx Index.
* @param id Node uuid.
*/
@SuppressWarnings("unchecked")
public void handleDemandMessage(int idx, UUID id, GridDhtPartitionDemandMessage d) {
assert d != null;
assert id != null;
AffinityTopologyVersion cutTop = cctx.affinity().affinityTopologyVersion();
AffinityTopologyVersion demTop = d.topologyVersion();
T3<UUID, Integer, AffinityTopologyVersion> scId = new T3<>(id, idx, demTop);
if (d.updateSequence() == -1) {
//Demand node requested context cleanup.
synchronized (scMap) {
clearContext(scMap.remove(scId), log);
return;
}
}
if (cutTop.compareTo(demTop) > 0) {
if (log.isDebugEnabled())
log.debug("Demand request cancelled [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
return;
}
if (log.isDebugEnabled())
log.debug("Demand request accepted [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
GridDhtPartitionSupplyMessage s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
ClusterNode node = cctx.discovery().node(id);
if (node == null)
// Context will be cleaned at topology change.
return;
try {
SupplyContext sctx;
synchronized (scMap) {
sctx = scMap.remove(scId);
assert sctx == null || d.updateSequence() == sctx.updateSeq;
}
// Initial demand request should contain partitions list.
if (sctx == null && d.partitions() == null)
return;
assert !(sctx != null && d.partitions() != null);
long bCnt = 0;
SupplyContextPhase phase = SupplyContextPhase.NEW;
boolean newReq = true;
long maxBatchesCnt = cctx.config().getRebalanceBatchesPrefetchCount();
if (sctx != null) {
phase = sctx.phase;
maxBatchesCnt = 1;
} else {
if (log.isDebugEnabled())
log.debug("Starting supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", partitionsCount=" + d.partitions().size() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
}
Iterator<Integer> partIt = sctx != null ? sctx.partIt : d.partitions().iterator();
while ((sctx != null && newReq) || partIt.hasNext()) {
int part = sctx != null && newReq ? sctx.part : partIt.next();
newReq = false;
GridDhtLocalPartition loc;
if (sctx != null && sctx.loc != null) {
loc = sctx.loc;
assert loc.reservations() > 0;
} else {
loc = top.localPartition(part, d.topologyVersion(), false);
if (loc == null || loc.state() != OWNING || !loc.reserve()) {
// Reply with partition of "-1" to let sender know that
// this node is no longer an owner.
s.missed(part);
if (log.isDebugEnabled())
log.debug("Requested partition is not owned by local node [part=" + part + ", demander=" + id + ']');
continue;
}
}
try {
boolean partMissing = false;
if (phase == SupplyContextPhase.NEW)
phase = SupplyContextPhase.OFFHEAP;
if (phase == SupplyContextPhase.OFFHEAP) {
IgniteRebalanceIterator iter;
if (sctx == null || sctx.entryIt == null) {
iter = cctx.offheap().rebalanceIterator(part, d.topologyVersion(), d.partitionCounter(part));
if (!iter.historical())
s.clean(part);
} else
iter = (IgniteRebalanceIterator) sctx.entryIt;
while (iter.hasNext()) {
if (!cctx.affinity().partitionBelongs(node, part, d.topologyVersion())) {
// Demander no longer needs this partition,
// so we send '-1' partition and move on.
s.missed(part);
if (log.isDebugEnabled())
log.debug("Demanding node does not need requested partition " + "[part=" + part + ", nodeId=" + id + ']');
partMissing = true;
if (sctx != null) {
sctx = new SupplyContext(phase, partIt, null, part, loc, d.updateSequence());
}
break;
}
if (s.messageSize() >= cctx.config().getRebalanceBatchSize()) {
if (++bCnt >= maxBatchesCnt) {
saveSupplyContext(scId, phase, partIt, part, iter, loc, d.topologyVersion(), d.updateSequence());
loc = null;
reply(node, d, s, scId);
return;
} else {
if (!reply(node, d, s, scId))
return;
s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
}
}
CacheDataRow row = iter.next();
GridCacheEntryInfo info = new GridCacheEntryInfo();
info.key(row.key());
info.expireTime(row.expireTime());
info.version(row.version());
info.value(row.value());
if (preloadPred == null || preloadPred.apply(info))
s.addEntry0(part, info, cctx);
else {
if (log.isDebugEnabled())
log.debug("Rebalance predicate evaluated to false (will not send " + "cache entry): " + info);
continue;
}
// Need to manually prepare cache message.
// TODO GG-11141.
// if (depEnabled && !prepared) {
// ClassLoader ldr = swapEntry.keyClassLoaderId() != null ?
// cctx.deploy().getClassLoader(swapEntry.keyClassLoaderId()) :
// swapEntry.valueClassLoaderId() != null ?
// cctx.deploy().getClassLoader(swapEntry.valueClassLoaderId()) :
// null;
//
// if (ldr == null)
// continue;
//
// if (ldr instanceof GridDeploymentInfo) {
// s.prepare((GridDeploymentInfo)ldr);
//
// prepared = true;
// }
// }
}
if (partMissing)
continue;
}
// Mark as last supply message.
s.last(part);
phase = SupplyContextPhase.NEW;
sctx = null;
} finally {
if (loc != null)
loc.release();
}
}
reply(node, d, s, scId);
if (log.isDebugEnabled())
log.debug("Finished supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send partition supply message to node: " + id, e);
} catch (IgniteSpiException e) {
if (log.isDebugEnabled())
log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() + ", msg=" + e.getMessage() + ']');
}
}
Aggregations