use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState in project ignite by apache.
the class GridCacheDatabaseSharedManager method readPartitionState.
/**
* @param grpCtx Group context.
* @param partId Partition ID.
* @return Partition state.
*/
public GridDhtPartitionState readPartitionState(CacheGroupContext grpCtx, int partId) {
int grpId = grpCtx.groupId();
PageMemoryEx pageMem = (PageMemoryEx) grpCtx.dataRegion().pageMemory();
try {
if (storeMgr.exists(grpId, partId)) {
storeMgr.ensure(grpId, partId);
if (storeMgr.pages(grpId, partId) > 1) {
long partMetaId = pageMem.partitionMetaPageId(grpId, partId);
long partMetaPage = pageMem.acquirePage(grpId, partMetaId);
try {
long pageAddr = pageMem.readLock(grpId, partMetaId, partMetaPage);
try {
if (PageIO.getType(pageAddr) == PageIO.T_PART_META) {
PagePartitionMetaIO io = PagePartitionMetaIO.VERSIONS.forPage(pageAddr);
GridDhtPartitionState state = GridDhtPartitionState.fromOrdinal((int) io.getPartitionState(pageAddr));
if (state == null)
state = GridDhtPartitionState.MOVING;
return state;
}
} finally {
pageMem.readUnlock(grpId, partMetaId, partMetaPage);
}
} finally {
pageMem.releasePage(grpId, partMetaId, partMetaPage);
}
}
}
} catch (IgniteCheckedException e) {
U.error(log, "Failed to read partition state (will default to MOVING) [grp=" + grpCtx + ", partId=" + partId + "]", e);
}
return GridDhtPartitionState.MOVING;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState in project ignite by apache.
the class GridDhtPartitionsExchangeFuture method assignPartitionStates.
/**
* @param top Topology to assign.
*/
private void assignPartitionStates(GridDhtPartitionTopology top) {
Map<Integer, CounterWithNodes> maxCntrs = new HashMap<>();
Map<Integer, Long> minCntrs = new HashMap<>();
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : msgs.entrySet()) {
CachePartitionPartialCountersMap nodeCntrs = e.getValue().partitionUpdateCounters(top.groupId(), top.partitions());
assert nodeCntrs != null;
for (int i = 0; i < nodeCntrs.size(); i++) {
int p = nodeCntrs.partitionAt(i);
UUID uuid = e.getKey();
GridDhtPartitionState state = top.partitionState(uuid, p);
if (state != GridDhtPartitionState.OWNING && state != GridDhtPartitionState.MOVING)
continue;
long cntr = state == GridDhtPartitionState.MOVING ? nodeCntrs.initialUpdateCounterAt(i) : nodeCntrs.updateCounterAt(i);
Long minCntr = minCntrs.get(p);
if (minCntr == null || minCntr > cntr)
minCntrs.put(p, cntr);
if (state != GridDhtPartitionState.OWNING)
continue;
CounterWithNodes maxCntr = maxCntrs.get(p);
if (maxCntr == null || cntr > maxCntr.cnt)
maxCntrs.put(p, new CounterWithNodes(cntr, uuid));
else if (cntr == maxCntr.cnt)
maxCntr.nodes.add(uuid);
}
}
// Also must process counters from the local node.
for (GridDhtLocalPartition part : top.currentLocalPartitions()) {
GridDhtPartitionState state = top.partitionState(cctx.localNodeId(), part.id());
if (state != GridDhtPartitionState.OWNING && state != GridDhtPartitionState.MOVING)
continue;
final long cntr = state == GridDhtPartitionState.MOVING ? part.initialUpdateCounter() : part.updateCounter();
Long minCntr = minCntrs.get(part.id());
if (minCntr == null || minCntr > cntr)
minCntrs.put(part.id(), cntr);
if (state != GridDhtPartitionState.OWNING)
continue;
CounterWithNodes maxCntr = maxCntrs.get(part.id());
if (maxCntr == null && cntr == 0) {
CounterWithNodes cntrObj = new CounterWithNodes(0, cctx.localNodeId());
for (UUID nodeId : msgs.keySet()) {
if (top.partitionState(nodeId, part.id()) == GridDhtPartitionState.OWNING)
cntrObj.nodes.add(nodeId);
}
maxCntrs.put(part.id(), cntrObj);
} else if (maxCntr == null || cntr > maxCntr.cnt)
maxCntrs.put(part.id(), new CounterWithNodes(cntr, cctx.localNodeId()));
else if (cntr == maxCntr.cnt)
maxCntr.nodes.add(cctx.localNodeId());
}
int entryLeft = maxCntrs.size();
Map<Integer, Map<Integer, Long>> partHistReserved0 = partHistReserved;
Map<Integer, Long> localReserved = partHistReserved0 != null ? partHistReserved0.get(top.groupId()) : null;
Set<Integer> haveHistory = new HashSet<>();
for (Map.Entry<Integer, Long> e : minCntrs.entrySet()) {
int p = e.getKey();
long minCntr = e.getValue();
CounterWithNodes maxCntrObj = maxCntrs.get(p);
long maxCntr = maxCntrObj != null ? maxCntrObj.cnt : 0;
// If minimal counter is zero, do clean preloading.
if (minCntr == 0 || minCntr == maxCntr)
continue;
if (localReserved != null) {
Long localCntr = localReserved.get(p);
if (localCntr != null && localCntr <= minCntr && maxCntrObj.nodes.contains(cctx.localNodeId())) {
partHistSuppliers.put(cctx.localNodeId(), top.groupId(), p, minCntr);
haveHistory.add(p);
continue;
}
}
for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e0 : msgs.entrySet()) {
Long histCntr = e0.getValue().partitionHistoryCounters(top.groupId()).get(p);
if (histCntr != null && histCntr <= minCntr && maxCntrObj.nodes.contains(e0.getKey())) {
partHistSuppliers.put(e0.getKey(), top.groupId(), p, minCntr);
haveHistory.add(p);
break;
}
}
}
for (Map.Entry<Integer, CounterWithNodes> e : maxCntrs.entrySet()) {
int p = e.getKey();
long maxCntr = e.getValue().cnt;
entryLeft--;
if (entryLeft != 0 && maxCntr == 0)
continue;
Set<UUID> nodesToReload = top.setOwners(p, e.getValue().nodes, haveHistory.contains(p), entryLeft == 0);
for (UUID nodeId : nodesToReload) partsToReload.put(nodeId, top.groupId(), p);
}
}
Aggregations