use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridCacheAdapter method getAllAsync0.
/**
* @param keys Keys.
* @param readerArgs Near cache reader will be added if not null.
* @param readThrough Read-through flag.
* @param checkTx Check local transaction flag.
* @param subjId Subject ID.
* @param taskName Task name/
* @param deserializeBinary Deserialize binary flag.
* @param expiry Expiry policy.
* @param skipVals Skip values flag.
* @param keepCacheObjects Keep cache objects.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @return Future.
*/
protected final <K1, V1> IgniteInternalFuture<Map<K1, V1>> getAllAsync0(@Nullable final Collection<KeyCacheObject> keys, @Nullable final ReaderArguments readerArgs, final boolean readThrough, boolean checkTx, @Nullable final UUID subjId, final String taskName, final boolean deserializeBinary, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean skipVals, final boolean keepCacheObjects, final boolean recovery, final boolean needVer) {
if (F.isEmpty(keys))
return new GridFinishedFuture<>(Collections.<K1, V1>emptyMap());
GridNearTxLocal tx = null;
if (checkTx) {
try {
checkJta();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
tx = ctx.tm().threadLocalTx(ctx);
}
if (tx == null || tx.implicit()) {
Map<KeyCacheObject, EntryGetResult> misses = null;
Set<GridCacheEntryEx> newLocalEntries = null;
final AffinityTopologyVersion topVer = tx == null ? ctx.affinity().affinityTopologyVersion() : tx.topologyVersion();
try {
int keysSize = keys.size();
GridDhtTopologyFuture topFut = ctx.shared().exchange().lastFinishedFuture();
Throwable ex = topFut != null ? topFut.validateCache(ctx, recovery, /*read*/
true, null, keys) : null;
if (ex != null)
return new GridFinishedFuture<>(ex);
final Map<K1, V1> map = keysSize == 1 ? (Map<K1, V1>) new IgniteBiTuple<>() : U.<K1, V1>newHashMap(keysSize);
final boolean storeEnabled = !skipVals && readThrough && ctx.readThrough();
boolean readNoEntry = ctx.readNoEntry(expiry, readerArgs != null);
for (KeyCacheObject key : keys) {
while (true) {
try {
EntryGetResult res = null;
boolean evt = !skipVals;
boolean updateMetrics = !skipVals;
GridCacheEntryEx entry = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime != 0) {
if (expireTime > U.currentTimeMillis()) {
res = new EntryGetWithTtlResult(row.value(), row.version(), false, expireTime, 0);
} else
skipEntry = false;
} else
res = new EntryGetResult(row.value(), row.version(), false);
}
if (res != null) {
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
if (updateMetrics && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
} else if (storeEnabled)
skipEntry = false;
}
if (!skipEntry) {
boolean isNewLocalEntry = this.map.getEntry(ctx, key) == null;
entry = entryEx(key);
if (entry == null) {
if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(false);
break;
}
if (isNewLocalEntry) {
if (newLocalEntries == null)
newLocalEntries = new HashSet<>();
newLocalEntries.add(entry);
}
if (storeEnabled) {
res = entry.innerGetAndReserveForLoad(updateMetrics, evt, subjId, taskName, expiry, !deserializeBinary, readerArgs);
assert res != null;
if (res.value() == null) {
if (misses == null)
misses = new HashMap<>();
misses.put(key, res);
res = null;
}
} else {
res = entry.innerGetVersioned(null, null, updateMetrics, evt, subjId, null, taskName, expiry, !deserializeBinary, readerArgs);
if (res == null)
ctx.evicts().touch(entry, topVer);
}
}
if (res != null) {
ctx.addResult(map, key, res, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED)))
ctx.evicts().touch(entry, topVer);
if (keysSize == 1)
// Safe to return because no locks are required in READ_COMMITTED mode.
return new GridFinishedFuture<>(map);
}
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry in getAllAsync(..) method (will retry): " + key);
}
}
}
if (storeEnabled && misses != null) {
final Map<KeyCacheObject, EntryGetResult> loadKeys = misses;
final IgniteTxLocalAdapter tx0 = tx;
final Collection<KeyCacheObject> loaded = new HashSet<>();
return new GridEmbeddedFuture(ctx.closures().callLocalSafe(ctx.projectSafe(new GPC<Map<K1, V1>>() {
@Override
public Map<K1, V1> call() throws Exception {
ctx.store().loadAll(null, /*tx*/
loadKeys.keySet(), new CI2<KeyCacheObject, Object>() {
@Override
public void apply(KeyCacheObject key, Object val) {
EntryGetResult res = loadKeys.get(key);
if (res == null || val == null)
return;
loaded.add(key);
CacheObject cacheVal = ctx.toCacheObject(val);
while (true) {
GridCacheEntryEx entry = null;
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
}
ctx.shared().database().checkpointReadLock();
try {
entry = entryEx(key);
entry.unswap();
EntryGetResult verVal = entry.versionedValue(cacheVal, res.version(), null, expiry, readerArgs);
if (log.isDebugEnabled())
log.debug("Set value loaded from store into entry [" + "oldVer=" + res.version() + ", newVer=" + verVal.version() + ", " + "entry=" + entry + ']');
// Don't put key-value pair into result map if value is null.
if (verVal.value() != null) {
ctx.addResult(map, key, verVal, skipVals, keepCacheObjects, deserializeBinary, true, needVer);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED))
ctx.evicts().touch(entry, topVer);
break;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry during getAllAsync (will retry): " + entry);
} catch (IgniteCheckedException e) {
// Wrap errors (will be unwrapped).
throw new GridClosureException(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
}
});
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return map;
}
}), true), new C2<Map<K, V>, Exception, IgniteInternalFuture<Map<K, V>>>() {
@Override
public IgniteInternalFuture<Map<K, V>> apply(Map<K, V> map, Exception e) {
if (e != null) {
clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0);
return new GridFinishedFuture<>(e);
}
if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED)) {
Collection<KeyCacheObject> notFound = new HashSet<>(loadKeys.keySet());
notFound.removeAll(loaded);
// Touch entries that were not found in store.
for (KeyCacheObject key : notFound) {
GridCacheEntryEx entry = peekEx(key);
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
// There were no misses.
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
}
}, new C2<Map<K1, V1>, Exception, Map<K1, V1>>() {
@Override
public Map<K1, V1> apply(Map<K1, V1> loaded, Exception e) {
if (e == null)
map.putAll(loaded);
return map;
}
});
} else
// Misses can be non-zero only if store is enabled.
assert misses == null;
return new GridFinishedFuture<>(map);
} catch (RuntimeException | AssertionError e) {
if (misses != null) {
for (KeyCacheObject key0 : misses.keySet()) ctx.evicts().touch(peekEx(key0), topVer);
}
if (newLocalEntries != null) {
for (GridCacheEntryEx entry : newLocalEntries) removeEntry(entry);
}
return new GridFinishedFuture<>(e);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
} else {
return asyncOp(tx, new AsyncOp<Map<K1, V1>>(keys) {
@Override
public IgniteInternalFuture<Map<K1, V1>> op(GridNearTxLocal tx, AffinityTopologyVersion readyTopVer) {
return tx.getAllAsync(ctx, readyTopVer, keys, deserializeBinary, skipVals, false, !readThrough, recovery, needVer);
}
}, ctx.operationContextPerCall(), /*retry*/
false);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridCacheMapEntry method updateIndex.
/**
* {@inheritDoc}
*/
@Override
public void updateIndex(SchemaIndexCacheFilter filter, SchemaIndexCacheVisitorClosure clo) throws IgniteCheckedException, GridCacheEntryRemovedException {
lockEntry();
try {
if (isInternal())
return;
checkObsolete();
CacheDataRow row = cctx.offheap().read(this);
if (row != null && (filter == null || filter.apply(row)))
clo.apply(row);
} finally {
unlockEntry();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridDhtAtomicCache method getAllAsync0.
/**
* Entry point to all public API get methods.
*
* @param keys Keys.
* @param forcePrimary Force primary flag.
* @param subjId Subject ID.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param skipStore Skip store flag.
* @param needVer Need version.
* @return Get future.
*/
private IgniteInternalFuture<Map<K, V>> getAllAsync0(@Nullable Collection<KeyCacheObject> keys, boolean forcePrimary, UUID subjId, String taskName, boolean deserializeBinary, boolean recovery, @Nullable ExpiryPolicy expiryPlc, boolean skipVals, boolean skipStore, boolean needVer) {
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
final IgniteCacheExpiryPolicy expiry = skipVals ? null : expiryPolicy(expiryPlc);
final boolean evt = !skipVals;
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.affinityNode()) {
try {
Map<K, V> locVals = U.newHashMap(keys.size());
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
// Optimistically expect that all keys are available locally (avoid creation of get future).
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(locVals, key, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, true, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiry, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(context().versions().next()))
removeEntry(entry);
success = false;
} else {
ctx.addResult(locVals, key, v, skipVals, false, deserializeBinary, true, getRes, ver, 0, 0, needVer);
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiry);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (expiry != null)
expiry.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, !skipStore, forcePrimary, subjId, taskName, deserializeBinary, recovery, expiry, skipVals, needVer, false);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridDhtColocatedCache method loadAsync.
/**
* @param keys Keys to load.
* @param readThrough Read through flag.
* @param forcePrimary Force get from primary node flag.
* @param topVer Topology version.
* @param subjId Subject ID.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @param keepCacheObj Keep cache objects flag.
* @return Load future.
*/
public final IgniteInternalFuture<Map<K, V>> loadAsync(@Nullable Collection<KeyCacheObject> keys, boolean readThrough, boolean forcePrimary, AffinityTopologyVersion topVer, @Nullable UUID subjId, String taskName, boolean deserializeBinary, boolean recovery, @Nullable IgniteCacheExpiryPolicy expiryPlc, boolean skipVals, boolean needVer, boolean keepCacheObj) {
if (keys == null || keys.isEmpty())
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
if (expiryPlc == null)
expiryPlc = expiryPolicy(null);
// Optimisation: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.affinityNode()) {
try {
Map<K, V> locVals = null;
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, row.value(), skipVals, keepCacheObj, deserializeBinary, true, null, row.version(), 0, 0, needVer);
if (evt) {
ctx.events().readEvent(key, null, row.value(), subjId, taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, subjId, null, taskName, expiryPlc, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
GridCacheVersion obsoleteVer = context().versions().next();
if (isNew && entry.markObsoleteIfEmpty(obsoleteVer))
removeEntry(entry);
success = false;
} else {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, v, skipVals, keepCacheObj, deserializeBinary, true, getRes, ver, 0, 0, needVer);
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
context().evicts().touch(entry, topVer);
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiryPlc);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
if (expiryPlc != null)
expiryPlc.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, readThrough, forcePrimary, subjId, taskName, deserializeBinary, recovery, expiryPlc, skipVals, needVer, keepCacheObj);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridDhtLocalPartition method clearAll.
/**
* Removes all entries and rows from this partition.
*
* @return Number of rows cleared from page memory.
* @throws NodeStoppingException If node stopping.
*/
protected long clearAll(EvictionContext evictionCtx) throws NodeStoppingException {
long order = clearVer;
GridCacheVersion clearVer = ctx.versions().startVersion();
GridCacheObsoleteEntryExtras extras = new GridCacheObsoleteEntryExtras(clearVer);
boolean rec = grp.eventRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED);
long cleared = 0;
int stopCntr = 0;
CacheMapHolder hld = grp.sharedGroup() ? null : singleCacheEntryMap;
boolean recoveryMode = ctx.kernalContext().recoveryMode();
try {
// because atomic and transactional caches use different partition counters implementation.
if (state() == MOVING && !recoveryMode && grp.walEnabled() && grp.config().getAtomicityMode() == ATOMIC)
ctx.wal().log(new PartitionClearingStartRecord(id, grp.groupId(), order));
GridIterator<CacheDataRow> it0 = grp.offheap().partitionIterator(id);
while (it0.hasNext()) {
if ((stopCntr = (stopCntr + 1) & 1023) == 0 && evictionCtx.shouldStop())
return cleared;
ctx.database().checkpointReadLock();
try {
CacheDataRow row = it0.next();
// Do not clear fresh rows in case of partition reloading.
// This is required because normal updates are possible to moving partition which is currently cleared.
// We can clean OWNING partition if a partition has been reset from lost state.
// In this case new updates must be preserved.
// Partition state can be switched from RENTING to MOVING and vice versa during clearing.
long order0 = row.version().order();
if ((state() == MOVING || recoveryMode) && (order0 == 0 || /**
* Inserted by isolated updater.
*/
order0 > order))
continue;
if (grp.sharedGroup() && (hld == null || hld.cctx.cacheId() != row.cacheId()))
hld = cacheMapHolder(ctx.cacheContext(row.cacheId()));
assert hld != null;
GridCacheMapEntry cached = putEntryIfObsoleteOrAbsent(hld, hld.cctx, grp.affinity().lastVersion(), row.key(), true, true);
assert cached != null : "Expecting the reservation " + this;
if (cached.deleted())
continue;
if (cached instanceof GridDhtCacheEntry && ((GridDhtCacheEntry) cached).clearInternal(clearVer, extras)) {
removeEntry(cached);
if (rec && !hld.cctx.config().isEventsDisabled()) {
hld.cctx.events().addEvent(cached.partition(), cached.key(), ctx.localNodeId(), null, null, null, EVT_CACHE_REBALANCE_OBJECT_UNLOADED, null, false, cached.rawGet(), cached.hasValue(), null, null, false);
}
cleared++;
}
} catch (GridDhtInvalidPartitionException e) {
assert isEmpty() && state() == EVICTED : "Invalid error [e=" + e + ", part=" + this + ']';
// Partition is already concurrently cleared and evicted.
break;
} finally {
ctx.database().checkpointReadUnlock();
}
}
if (forceTestCheckpointOnEviction) {
if (partWhereTestCheckpointEnforced == null && cleared >= fullSize()) {
ctx.database().forceCheckpoint("test").futureFor(FINISHED).get();
log.warning("Forced checkpoint by test reasons for partition: " + this);
partWhereTestCheckpointEnforced = id;
}
}
// Attempt to destroy.
if (!recoveryMode)
((GridDhtPreloader) grp.preloader()).tryFinishEviction(this);
} catch (NodeStoppingException e) {
if (log.isDebugEnabled())
log.debug("Failed to get iterator for evicted partition: " + id);
throw e;
} catch (IgniteCheckedException e) {
U.error(log, "Failed to get iterator for evicted partition: " + id, e);
}
return cleared;
}
Aggregations