use of org.apache.ignite.internal.processors.cache.GridCacheEntryInfo in project ignite by apache.
the class GridDhtGetSingleFuture method toEntryInfo.
/**
* @param map Map to convert.
* @return List of infos.
*/
private GridCacheEntryInfo toEntryInfo(Map<KeyCacheObject, EntryGetResult> map) {
if (map.isEmpty())
return null;
EntryGetResult val = map.get(key);
assert val != null;
GridCacheEntryInfo info = new GridCacheEntryInfo();
info.cacheId(cctx.cacheId());
info.key(key);
info.value(skipVals ? null : (CacheObject) val.value());
info.version(val.version());
info.expireTime(val.expireTime());
info.ttl(val.ttl());
return info;
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryInfo in project ignite by apache.
the class GridDhtPreloader method processForceKeysRequest0.
/**
* @param node Node originated request.
* @param msg Force keys message.
*/
private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg) {
if (!enterBusy())
return;
try {
ClusterNode loc = cctx.localNode();
GridDhtForceKeysResponse res = new GridDhtForceKeysResponse(cctx.cacheId(), msg.futureId(), msg.miniId(), cctx.deploymentEnabled());
for (KeyCacheObject k : msg.keys()) {
int p = cctx.affinity().partition(k);
GridDhtLocalPartition locPart = top.localPartition(p, AffinityTopologyVersion.NONE, false);
// If this node is no longer an owner.
if (locPart == null && !top.owners(p).contains(loc)) {
res.addMissed(k);
continue;
}
GridCacheEntryEx entry = null;
while (true) {
try {
entry = cctx.dht().entryEx(k);
entry.unswap();
GridCacheEntryInfo info = entry.info();
if (info == null) {
assert entry.obsolete() : entry;
continue;
}
if (!info.isNew())
res.addInfo(info);
cctx.evicts().touch(entry, msg.topologyVersion());
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry: " + k);
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Local node is no longer an owner: " + p);
res.addMissed(k);
break;
}
}
}
if (log.isDebugEnabled())
log.debug("Sending force key response [node=" + node.id() + ", res=" + res + ']');
cctx.io().send(node, res, cctx.ioPolicy());
} catch (ClusterTopologyCheckedException ignore) {
if (log.isDebugEnabled())
log.debug("Received force key request form failed node (will ignore) [nodeId=" + node.id() + ", req=" + msg + ']');
} catch (IgniteCheckedException e) {
U.error(log, "Failed to reply to force key request [nodeId=" + node.id() + ", req=" + msg + ']', e);
} finally {
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryInfo in project ignite by apache.
the class GridDhtPartitionSupplier method handleDemandMessage.
/**
* @param d Demand message.
* @param idx Index.
* @param id Node uuid.
*/
@SuppressWarnings("unchecked")
public void handleDemandMessage(int idx, UUID id, GridDhtPartitionDemandMessage d) {
assert d != null;
assert id != null;
AffinityTopologyVersion cutTop = cctx.affinity().affinityTopologyVersion();
AffinityTopologyVersion demTop = d.topologyVersion();
T3<UUID, Integer, AffinityTopologyVersion> scId = new T3<>(id, idx, demTop);
if (d.updateSequence() == -1) {
//Demand node requested context cleanup.
synchronized (scMap) {
clearContext(scMap.remove(scId), log);
return;
}
}
if (cutTop.compareTo(demTop) > 0) {
if (log.isDebugEnabled())
log.debug("Demand request cancelled [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
return;
}
if (log.isDebugEnabled())
log.debug("Demand request accepted [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
GridDhtPartitionSupplyMessage s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
ClusterNode node = cctx.discovery().node(id);
if (node == null)
// Context will be cleaned at topology change.
return;
try {
SupplyContext sctx;
synchronized (scMap) {
sctx = scMap.remove(scId);
assert sctx == null || d.updateSequence() == sctx.updateSeq;
}
// Initial demand request should contain partitions list.
if (sctx == null && d.partitions() == null)
return;
assert !(sctx != null && d.partitions() != null);
long bCnt = 0;
SupplyContextPhase phase = SupplyContextPhase.NEW;
boolean newReq = true;
long maxBatchesCnt = cctx.config().getRebalanceBatchesPrefetchCount();
if (sctx != null) {
phase = sctx.phase;
maxBatchesCnt = 1;
} else {
if (log.isDebugEnabled())
log.debug("Starting supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", partitionsCount=" + d.partitions().size() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
}
Iterator<Integer> partIt = sctx != null ? sctx.partIt : d.partitions().iterator();
while ((sctx != null && newReq) || partIt.hasNext()) {
int part = sctx != null && newReq ? sctx.part : partIt.next();
newReq = false;
GridDhtLocalPartition loc;
if (sctx != null && sctx.loc != null) {
loc = sctx.loc;
assert loc.reservations() > 0;
} else {
loc = top.localPartition(part, d.topologyVersion(), false);
if (loc == null || loc.state() != OWNING || !loc.reserve()) {
// Reply with partition of "-1" to let sender know that
// this node is no longer an owner.
s.missed(part);
if (log.isDebugEnabled())
log.debug("Requested partition is not owned by local node [part=" + part + ", demander=" + id + ']');
continue;
}
}
try {
boolean partMissing = false;
if (phase == SupplyContextPhase.NEW)
phase = SupplyContextPhase.OFFHEAP;
if (phase == SupplyContextPhase.OFFHEAP) {
IgniteRebalanceIterator iter;
if (sctx == null || sctx.entryIt == null) {
iter = cctx.offheap().rebalanceIterator(part, d.topologyVersion(), d.partitionCounter(part));
if (!iter.historical())
s.clean(part);
} else
iter = (IgniteRebalanceIterator) sctx.entryIt;
while (iter.hasNext()) {
if (!cctx.affinity().partitionBelongs(node, part, d.topologyVersion())) {
// Demander no longer needs this partition,
// so we send '-1' partition and move on.
s.missed(part);
if (log.isDebugEnabled())
log.debug("Demanding node does not need requested partition " + "[part=" + part + ", nodeId=" + id + ']');
partMissing = true;
if (sctx != null) {
sctx = new SupplyContext(phase, partIt, null, part, loc, d.updateSequence());
}
break;
}
if (s.messageSize() >= cctx.config().getRebalanceBatchSize()) {
if (++bCnt >= maxBatchesCnt) {
saveSupplyContext(scId, phase, partIt, part, iter, loc, d.topologyVersion(), d.updateSequence());
loc = null;
reply(node, d, s, scId);
return;
} else {
if (!reply(node, d, s, scId))
return;
s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
}
}
CacheDataRow row = iter.next();
GridCacheEntryInfo info = new GridCacheEntryInfo();
info.key(row.key());
info.expireTime(row.expireTime());
info.version(row.version());
info.value(row.value());
if (preloadPred == null || preloadPred.apply(info))
s.addEntry0(part, info, cctx);
else {
if (log.isDebugEnabled())
log.debug("Rebalance predicate evaluated to false (will not send " + "cache entry): " + info);
continue;
}
// Need to manually prepare cache message.
// TODO GG-11141.
// if (depEnabled && !prepared) {
// ClassLoader ldr = swapEntry.keyClassLoaderId() != null ?
// cctx.deploy().getClassLoader(swapEntry.keyClassLoaderId()) :
// swapEntry.valueClassLoaderId() != null ?
// cctx.deploy().getClassLoader(swapEntry.valueClassLoaderId()) :
// null;
//
// if (ldr == null)
// continue;
//
// if (ldr instanceof GridDeploymentInfo) {
// s.prepare((GridDeploymentInfo)ldr);
//
// prepared = true;
// }
// }
}
if (partMissing)
continue;
}
// Mark as last supply message.
s.last(part);
phase = SupplyContextPhase.NEW;
sctx = null;
} finally {
if (loc != null)
loc.release();
}
}
reply(node, d, s, scId);
if (log.isDebugEnabled())
log.debug("Finished supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send partition supply message to node: " + id, e);
} catch (IgniteSpiException e) {
if (log.isDebugEnabled())
log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() + ", msg=" + e.getMessage() + ']');
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryInfo in project ignite by apache.
the class GridDhtPartitionSupplyMessage method finishUnmarshal.
/** {@inheritDoc} */
@SuppressWarnings("ForLoopReplaceableByForEach")
@Override
public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
super.finishUnmarshal(ctx, ldr);
GridCacheContext cacheCtx = ctx.cacheContext(cacheId);
for (CacheEntryInfoCollection col : infos().values()) {
List<GridCacheEntryInfo> entries = col.infos();
for (int i = 0; i < entries.size(); i++) entries.get(i).unmarshal(cacheCtx, ldr);
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheEntryInfo in project ignite by apache.
the class GridDhtForceKeysResponse method prepareMarshal.
/** {@inheritDoc}
* @param ctx*/
@Override
public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
super.prepareMarshal(ctx);
GridCacheContext cctx = ctx.cacheContext(cacheId);
if (missedKeys != null)
prepareMarshalCacheObjects(missedKeys, cctx);
if (infos != null) {
for (GridCacheEntryInfo info : infos) info.marshal(cctx);
}
if (err != null && errBytes == null)
errBytes = U.marshal(ctx, err);
}
Aggregations