use of org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection in project ignite by apache.
the class GridDhtPartitionSupplyMessage method finishUnmarshal.
/** {@inheritDoc} */
@SuppressWarnings("ForLoopReplaceableByForEach")
@Override
public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
super.finishUnmarshal(ctx, ldr);
GridCacheContext cacheCtx = ctx.cacheContext(cacheId);
for (CacheEntryInfoCollection col : infos().values()) {
List<GridCacheEntryInfo> entries = col.infos();
for (int i = 0; i < entries.size(); i++) entries.get(i).unmarshal(cacheCtx, ldr);
}
}
use of org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection in project ignite by apache.
the class GridDhtPartitionDemander method handleSupplyMessage.
/**
* @param idx Index.
* @param id Node id.
* @param supply Supply.
*/
public void handleSupplyMessage(int idx, final UUID id, final GridDhtPartitionSupplyMessage supply) {
AffinityTopologyVersion topVer = supply.topologyVersion();
final RebalanceFuture fut = rebalanceFut;
ClusterNode node = cctx.node(id);
if (node == null)
return;
if (// Current future have another update sequence.
!fut.isActual(supply.updateSequence()))
// Supple message based on another future.
return;
if (// Topology already changed (for the future that supply message based on).
topologyChanged(fut))
return;
if (log.isDebugEnabled())
log.debug("Received supply message: " + supply);
// Check whether there were class loading errors on unmarshal
if (supply.classError() != null) {
U.warn(log, "Rebalancing from node cancelled [node=" + id + "]. Class got undeployed during preloading: " + supply.classError());
fut.cancel(id);
return;
}
final GridDhtPartitionTopology top = cctx.dht().topology();
try {
// Preload.
for (Map.Entry<Integer, CacheEntryInfoCollection> e : supply.infos().entrySet()) {
int p = e.getKey();
if (cctx.affinity().partitionLocalNode(p, topVer)) {
GridDhtLocalPartition part = top.localPartition(p, topVer, true);
assert part != null;
boolean last = supply.last().contains(p);
if (part.state() == MOVING) {
boolean reserved = part.reserve();
assert reserved : "Failed to reserve partition [igniteInstanceName=" + cctx.igniteInstanceName() + ", cacheName=" + cctx.name() + ", part=" + part + ']';
part.lock();
try {
// Loop through all received entries and try to preload them.
for (GridCacheEntryInfo entry : e.getValue().infos()) {
if (!part.preloadingPermitted(entry.key(), entry.version())) {
if (log.isDebugEnabled())
log.debug("Preloading is not permitted for entry due to " + "evictions [key=" + entry.key() + ", ver=" + entry.version() + ']');
continue;
}
if (!preloadEntry(node, p, entry, topVer)) {
if (log.isDebugEnabled())
log.debug("Got entries for invalid partition during " + "preloading (will skip) [p=" + p + ", entry=" + entry + ']');
break;
}
}
// then we take ownership.
if (last) {
top.own(part);
fut.partitionDone(id, p);
if (log.isDebugEnabled())
log.debug("Finished rebalancing partition: " + part);
}
} finally {
part.unlock();
part.release();
}
} else {
if (last)
fut.partitionDone(id, p);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (state is not MOVING): " + part);
}
} else {
fut.partitionDone(id, p);
if (log.isDebugEnabled())
log.debug("Skipping rebalancing partition (it does not belong on current node): " + p);
}
}
// Only request partitions based on latest topology version.
for (Integer miss : supply.missed()) {
if (cctx.affinity().partitionLocalNode(miss, topVer))
fut.partitionMissed(id, miss);
}
for (Integer miss : supply.missed()) fut.partitionDone(id, miss);
GridDhtPartitionDemandMessage d = new GridDhtPartitionDemandMessage(supply.updateSequence(), supply.topologyVersion(), cctx.cacheId());
d.timeout(cctx.config().getRebalanceTimeout());
d.topic(rebalanceTopics.get(idx));
if (!topologyChanged(fut) && !fut.isDone()) {
// Send demand message.
cctx.io().sendOrderedMessage(node, rebalanceTopics.get(idx), d, cctx.ioPolicy(), cctx.config().getRebalanceTimeout());
}
} catch (IgniteCheckedException e) {
if (log.isDebugEnabled())
log.debug("Node left during rebalancing [node=" + node.id() + ", msg=" + e.getMessage() + ']');
} catch (IgniteSpiException e) {
if (log.isDebugEnabled())
log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() + ", msg=" + e.getMessage() + ']');
}
}
use of org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection in project ignite by apache.
the class GridDhtPartitionSupplyMessage method addEntry0.
/**
* @param p Partition.
* @param info Entry to add.
* @param ctx Cache context.
* @throws IgniteCheckedException If failed.
*/
void addEntry0(int p, GridCacheEntryInfo info, GridCacheContext ctx) throws IgniteCheckedException {
assert info != null;
assert info.key() != null : info;
assert info.value() != null : info;
// Need to call this method to initialize info properly.
marshalInfo(info, ctx);
msgSize += info.marshalledSize(ctx);
CacheEntryInfoCollection infoCol = infos().get(p);
if (infoCol == null) {
msgSize += 4;
infos().put(p, infoCol = new CacheEntryInfoCollection());
infoCol.init();
}
infoCol.add(info);
}
use of org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection in project ignite by apache.
the class GridDhtPartitionSupplyMessage method last.
/**
* @param p Partition which was fully sent.
*/
void last(int p) {
if (last == null)
last = new HashSet<>();
if (last.add(p)) {
msgSize += 4;
// If partition is empty, we need to add it.
if (!infos().containsKey(p)) {
CacheEntryInfoCollection infoCol = new CacheEntryInfoCollection();
infoCol.init();
infos().put(p, infoCol);
}
}
}
Aggregations