use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class GridPartitionedGetFuture method localGet.
/**
* @param key Key.
* @param part Partition.
* @param locVals Local values.
* @return {@code True} if there is no need to further search value.
*/
private boolean localGet(KeyCacheObject key, int part, Map<K, V> locVals) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter<K, V> cache = cache();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
try {
boolean skipEntry = readNoEntry;
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (readNoEntry) {
CacheDataRow row = cctx.offheap().read(key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = cache.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
cache.context().evicts().touch(entry, topVer);
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
cache.removeEntry(entry);
}
}
}
if (v != null) {
cctx.addResult(locVals, key, v, skipVals, keepCacheObjects, deserializeBinary, true, getRes, ver, 0, 0, needVer);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().topologyVersion());
// Entry not found, do not continue search if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.config().isStatisticsEnabled())
cache.metrics0().onRead(false);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
}
}
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class GridDhtPartitionSupplier method handleDemandMessage.
/**
* @param d Demand message.
* @param idx Index.
* @param id Node uuid.
*/
@SuppressWarnings("unchecked")
public void handleDemandMessage(int idx, UUID id, GridDhtPartitionDemandMessage d) {
assert d != null;
assert id != null;
AffinityTopologyVersion cutTop = cctx.affinity().affinityTopologyVersion();
AffinityTopologyVersion demTop = d.topologyVersion();
T3<UUID, Integer, AffinityTopologyVersion> scId = new T3<>(id, idx, demTop);
if (d.updateSequence() == -1) {
//Demand node requested context cleanup.
synchronized (scMap) {
clearContext(scMap.remove(scId), log);
return;
}
}
if (cutTop.compareTo(demTop) > 0) {
if (log.isDebugEnabled())
log.debug("Demand request cancelled [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
return;
}
if (log.isDebugEnabled())
log.debug("Demand request accepted [current=" + cutTop + ", demanded=" + demTop + ", from=" + id + ", idx=" + idx + "]");
GridDhtPartitionSupplyMessage s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
ClusterNode node = cctx.discovery().node(id);
if (node == null)
// Context will be cleaned at topology change.
return;
try {
SupplyContext sctx;
synchronized (scMap) {
sctx = scMap.remove(scId);
assert sctx == null || d.updateSequence() == sctx.updateSeq;
}
// Initial demand request should contain partitions list.
if (sctx == null && d.partitions() == null)
return;
assert !(sctx != null && d.partitions() != null);
long bCnt = 0;
SupplyContextPhase phase = SupplyContextPhase.NEW;
boolean newReq = true;
long maxBatchesCnt = cctx.config().getRebalanceBatchesPrefetchCount();
if (sctx != null) {
phase = sctx.phase;
maxBatchesCnt = 1;
} else {
if (log.isDebugEnabled())
log.debug("Starting supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", partitionsCount=" + d.partitions().size() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
}
Iterator<Integer> partIt = sctx != null ? sctx.partIt : d.partitions().iterator();
while ((sctx != null && newReq) || partIt.hasNext()) {
int part = sctx != null && newReq ? sctx.part : partIt.next();
newReq = false;
GridDhtLocalPartition loc;
if (sctx != null && sctx.loc != null) {
loc = sctx.loc;
assert loc.reservations() > 0;
} else {
loc = top.localPartition(part, d.topologyVersion(), false);
if (loc == null || loc.state() != OWNING || !loc.reserve()) {
// Reply with partition of "-1" to let sender know that
// this node is no longer an owner.
s.missed(part);
if (log.isDebugEnabled())
log.debug("Requested partition is not owned by local node [part=" + part + ", demander=" + id + ']');
continue;
}
}
try {
boolean partMissing = false;
if (phase == SupplyContextPhase.NEW)
phase = SupplyContextPhase.OFFHEAP;
if (phase == SupplyContextPhase.OFFHEAP) {
IgniteRebalanceIterator iter;
if (sctx == null || sctx.entryIt == null) {
iter = cctx.offheap().rebalanceIterator(part, d.topologyVersion(), d.partitionCounter(part));
if (!iter.historical())
s.clean(part);
} else
iter = (IgniteRebalanceIterator) sctx.entryIt;
while (iter.hasNext()) {
if (!cctx.affinity().partitionBelongs(node, part, d.topologyVersion())) {
// Demander no longer needs this partition,
// so we send '-1' partition and move on.
s.missed(part);
if (log.isDebugEnabled())
log.debug("Demanding node does not need requested partition " + "[part=" + part + ", nodeId=" + id + ']');
partMissing = true;
if (sctx != null) {
sctx = new SupplyContext(phase, partIt, null, part, loc, d.updateSequence());
}
break;
}
if (s.messageSize() >= cctx.config().getRebalanceBatchSize()) {
if (++bCnt >= maxBatchesCnt) {
saveSupplyContext(scId, phase, partIt, part, iter, loc, d.topologyVersion(), d.updateSequence());
loc = null;
reply(node, d, s, scId);
return;
} else {
if (!reply(node, d, s, scId))
return;
s = new GridDhtPartitionSupplyMessage(d.updateSequence(), cctx.cacheId(), d.topologyVersion(), cctx.deploymentEnabled());
}
}
CacheDataRow row = iter.next();
GridCacheEntryInfo info = new GridCacheEntryInfo();
info.key(row.key());
info.expireTime(row.expireTime());
info.version(row.version());
info.value(row.value());
if (preloadPred == null || preloadPred.apply(info))
s.addEntry0(part, info, cctx);
else {
if (log.isDebugEnabled())
log.debug("Rebalance predicate evaluated to false (will not send " + "cache entry): " + info);
continue;
}
// Need to manually prepare cache message.
// TODO GG-11141.
// if (depEnabled && !prepared) {
// ClassLoader ldr = swapEntry.keyClassLoaderId() != null ?
// cctx.deploy().getClassLoader(swapEntry.keyClassLoaderId()) :
// swapEntry.valueClassLoaderId() != null ?
// cctx.deploy().getClassLoader(swapEntry.valueClassLoaderId()) :
// null;
//
// if (ldr == null)
// continue;
//
// if (ldr instanceof GridDeploymentInfo) {
// s.prepare((GridDeploymentInfo)ldr);
//
// prepared = true;
// }
// }
}
if (partMissing)
continue;
}
// Mark as last supply message.
s.last(part);
phase = SupplyContextPhase.NEW;
sctx = null;
} finally {
if (loc != null)
loc.release();
}
}
reply(node, d, s, scId);
if (log.isDebugEnabled())
log.debug("Finished supplying rebalancing [cache=" + cctx.name() + ", fromNode=" + node.id() + ", topology=" + d.topologyVersion() + ", updateSeq=" + d.updateSequence() + ", idx=" + idx + "]");
} catch (IgniteCheckedException e) {
U.error(log, "Failed to send partition supply message to node: " + id, e);
} catch (IgniteSpiException e) {
if (log.isDebugEnabled())
log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() + ", msg=" + e.getMessage() + ']');
}
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class GridCacheQueryManager method scanIterator.
/**
* @param qry Query.
* @param locNode Local node.
* @return Full-scan row iterator.
* @throws IgniteCheckedException If failed to get iterator.
*/
@SuppressWarnings({ "unchecked" })
private GridCloseableIterator<IgniteBiTuple<K, V>> scanIterator(final GridCacheQueryAdapter<?> qry, boolean locNode) throws IgniteCheckedException {
final IgniteBiPredicate<K, V> keyValFilter = qry.scanFilter();
try {
injectResources(keyValFilter);
Integer part = qry.partition();
if (cctx.isLocal())
part = null;
if (part != null && (part < 0 || part >= cctx.affinity().partitions()))
return new GridEmptyCloseableIterator<>();
final ExpiryPolicy plc = cctx.expiry();
AffinityTopologyVersion topVer = GridQueryProcessor.getRequestAffinityTopologyVersion();
if (topVer == null)
topVer = cctx.affinity().affinityTopologyVersion();
final boolean backups = qry.includeBackups() || cctx.isReplicated();
final GridDhtLocalPartition locPart;
final GridIterator<CacheDataRow> it;
if (part != null) {
final GridDhtCacheAdapter dht = cctx.isNear() ? cctx.near().dht() : cctx.dht();
GridDhtLocalPartition locPart0 = dht.topology().localPartition(part, topVer, false);
if (locPart0 == null || locPart0.state() != OWNING || !locPart0.reserve())
throw new GridDhtUnreservedPartitionException(part, cctx.affinity().affinityTopologyVersion(), "Partition can not be reserved");
if (locPart0.state() != OWNING) {
locPart0.release();
throw new GridDhtUnreservedPartitionException(part, cctx.affinity().affinityTopologyVersion(), "Partition can not be reserved");
}
locPart = locPart0;
it = cctx.offheap().iterator(part);
} else {
locPart = null;
it = cctx.offheap().iterator(true, backups, topVer);
}
return new PeekValueExpiryAwareIterator(it, plc, topVer, keyValFilter, qry.keepBinary(), locNode) {
@Override
protected void onClose() {
super.onClose();
if (locPart != null)
locPart.release();
closeScanFilter(keyValFilter);
}
};
} catch (IgniteCheckedException | RuntimeException e) {
closeScanFilter(keyValFilter);
throw e;
}
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class GridLocalAtomicCache method getAllInternal.
/**
* Entry point to all public API get methods.
*
* @param keys Keys to remove.
* @param storeEnabled Store enabled flag.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary .
* @param skipVals Skip value flag.
* @param needVer Need version.
* @return Key-value map.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("ConstantConditions")
private Map<K, V> getAllInternal(@Nullable Collection<? extends K> keys, boolean storeEnabled, String taskName, boolean deserializeBinary, boolean skipVals, boolean needVer) throws IgniteCheckedException {
ctx.checkSecurity(SecurityPermission.CACHE_READ);
if (F.isEmpty(keys))
return Collections.emptyMap();
CacheOperationContext opCtx = ctx.operationContextPerCall();
UUID subjId = ctx.subjectIdPerCall(null, opCtx);
Map<K, V> vals = U.newHashMap(keys.size());
if (keyCheck)
validateCacheKeys(keys);
final IgniteCacheExpiryPolicy expiry = expiryPolicy(opCtx != null ? opCtx.expiry() : null);
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
final boolean evt = !skipVals;
for (K key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(cacheKey);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(vals, cacheKey, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (configuration().isStatisticsEnabled() && !skipVals)
metrics0().onRead(true);
if (evt) {
ctx.events().readEvent(cacheKey, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
skipEntry = false;
} else
success = false;
}
if (!skipEntry) {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(cacheKey);
if (entry != null) {
CacheObject v;
if (needVer) {
EntryGetResult res = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary, null);
if (res != null) {
ctx.addResult(vals, cacheKey, res, skipVals, false, deserializeBinary, true, needVer);
} else
success = false;
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
true, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary);
if (v != null) {
ctx.addResult(vals, cacheKey, v, skipVals, false, deserializeBinary, true, null, 0, 0);
} else
success = false;
}
} else {
if (!storeEnabled && configuration().isStatisticsEnabled() && !skipVals)
metrics0().onRead(false);
success = false;
}
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} finally {
if (entry != null)
ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
}
if (!success && storeEnabled)
break;
}
}
}
if (success || !storeEnabled)
return vals;
return getAllAsync(keys, null, opCtx == null || !opCtx.skipStore(), false, subjId, taskName, deserializeBinary, opCtx != null && opCtx.recovery(), /*force primary*/
false, expiry, skipVals, /*can remap*/
true, needVer).get();
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class CacheContinuousQueryManager method executeQuery0.
/**
* @param locLsnr Local listener.
* @param clsr Closure to create CacheContinuousQueryHandler.
* @param bufSize Buffer size.
* @param timeInterval Time interval.
* @param autoUnsubscribe Auto unsubscribe flag.
* @param internal Internal flag.
* @param notifyExisting Notify existing flag.
* @param loc Local flag.
* @param keepBinary Keep binary flag.
* @param onStart Waiting topology exchange.
* @return Continuous routine ID.
* @throws IgniteCheckedException In case of error.
*/
private UUID executeQuery0(CacheEntryUpdatedListener locLsnr, IgniteOutClosure<CacheContinuousQueryHandler> clsr, int bufSize, long timeInterval, boolean autoUnsubscribe, boolean internal, boolean notifyExisting, boolean loc, final boolean keepBinary, boolean onStart) throws IgniteCheckedException {
cctx.checkSecurity(SecurityPermission.CACHE_READ);
int taskNameHash = !internal && cctx.kernalContext().security().enabled() ? cctx.kernalContext().job().currentTaskNameHash() : 0;
boolean skipPrimaryCheck = loc && cctx.config().getCacheMode() == CacheMode.REPLICATED && cctx.affinityNode();
final CacheContinuousQueryHandler hnd = clsr.apply();
hnd.taskNameHash(taskNameHash);
hnd.skipPrimaryCheck(skipPrimaryCheck);
hnd.notifyExisting(notifyExisting);
hnd.internal(internal);
hnd.keepBinary(keepBinary);
hnd.localCache(cctx.isLocal());
IgnitePredicate<ClusterNode> pred = (loc || cctx.config().getCacheMode() == CacheMode.LOCAL) ? F.nodeForNodeId(cctx.localNodeId()) : cctx.config().getNodeFilter();
assert pred != null : cctx.config();
UUID id = cctx.kernalContext().continuous().startRoutine(hnd, internal && loc, bufSize, timeInterval, autoUnsubscribe, pred).get();
try {
if (hnd.isQuery() && cctx.userCache() && !onStart)
hnd.waitTopologyFuture(cctx.kernalContext());
} catch (IgniteCheckedException e) {
log.warning("Failed to start continuous query.", e);
cctx.kernalContext().continuous().stopRoutine(id);
throw new IgniteCheckedException("Failed to start continuous query.", e);
}
if (notifyExisting) {
final Iterator<CacheDataRow> it = cctx.offheap().iterator(true, true, AffinityTopologyVersion.NONE);
locLsnr.onUpdated(new Iterable<CacheEntryEvent>() {
@Override
public Iterator<CacheEntryEvent> iterator() {
return new Iterator<CacheEntryEvent>() {
private CacheContinuousQueryEvent next;
{
advance();
}
@Override
public boolean hasNext() {
return next != null;
}
@Override
public CacheEntryEvent next() {
if (!hasNext())
throw new NoSuchElementException();
CacheEntryEvent next0 = next;
advance();
return next0;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
private void advance() {
next = null;
while (next == null) {
if (!it.hasNext())
break;
CacheDataRow e = it.next();
CacheContinuousQueryEntry entry = new CacheContinuousQueryEntry(cctx.cacheId(), CREATED, e.key(), e.value(), null, keepBinary, 0, -1, null, (byte) 0);
next = new CacheContinuousQueryEvent<>(cctx.kernalContext().cache().jcache(cctx.name()), cctx, entry);
if (hnd.getEventFilter() != null && !hnd.getEventFilter().evaluate(next))
next = null;
}
}
};
}
});
}
return id;
}
Aggregations