use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method read.
/** {@inheritDoc} */
@Nullable
@Override
public CacheDataRow read(KeyCacheObject key) throws IgniteCheckedException {
CacheDataRow row;
if (cctx.isLocal())
row = locCacheDataStore.find(key);
else {
GridDhtLocalPartition part = cctx.topology().localPartition(cctx.affinity().partition(key), null, false);
row = part != null ? dataStore(part).find(key) : null;
}
assert row == null || row.value() != null : row;
return row;
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method entriesIterator.
/**
* @param primary {@code True} if need return primary entries.
* @param backup {@code True} if need return backup entries.
* @param topVer Topology version to use.
* @return Entries iterator.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
@Override
public <K, V> GridCloseableIterator<Cache.Entry<K, V>> entriesIterator(final boolean primary, final boolean backup, final AffinityTopologyVersion topVer, final boolean keepBinary) throws IgniteCheckedException {
final Iterator<CacheDataRow> it = rowsIterator(primary, backup, topVer);
return new GridCloseableIteratorAdapter<Cache.Entry<K, V>>() {
/** */
private CacheEntryImplEx next;
@Override
protected Cache.Entry<K, V> onNext() {
CacheEntryImplEx ret = next;
next = null;
return ret;
}
@Override
protected boolean onHasNext() {
if (next != null)
return true;
CacheDataRow nextRow = null;
if (it.hasNext())
nextRow = it.next();
if (nextRow != null) {
KeyCacheObject key = nextRow.key();
CacheObject val = nextRow.value();
Object key0 = cctx.unwrapBinaryIfNeeded(key, keepBinary, false);
Object val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false);
next = new CacheEntryImplEx(key0, val0, nextRow.version());
return true;
}
return false;
}
};
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method iterator.
/** {@inheritDoc} */
@Override
public GridIterator<CacheDataRow> iterator(int part) throws IgniteCheckedException {
CacheDataStore data = partitionData(part);
if (data == null)
return new GridEmptyCloseableIterator<>();
final GridCursor<? extends CacheDataRow> cur = data.cursor();
return new GridCloseableIteratorAdapter<CacheDataRow>() {
/** */
private CacheDataRow next;
@Override
protected CacheDataRow onNext() {
CacheDataRow res = next;
next = null;
return res;
}
@Override
protected boolean onHasNext() throws IgniteCheckedException {
if (next != null)
return true;
if (cur.next())
next = cur.get();
return next != null;
}
};
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method clear.
/**
* Clears offheap entries.
*
* @param readers {@code True} to clear readers.
*/
@SuppressWarnings("unchecked")
@Override
public void clear(boolean readers) {
GridCacheVersion obsoleteVer = null;
GridIterator<CacheDataRow> it = rowsIterator(true, true, null);
while (it.hasNext()) {
KeyCacheObject key = it.next().key();
try {
if (obsoleteVer == null)
obsoleteVer = cctx.versions().next();
GridCacheEntryEx entry = cctx.cache().entryEx(key);
entry.clear(obsoleteVer, readers);
} catch (GridDhtInvalidPartitionException ignore) {
// Ignore.
} catch (IgniteCheckedException e) {
U.error(log, "Failed to clear cache entry: " + key, e);
}
}
}
use of org.apache.ignite.internal.processors.cache.database.CacheDataRow in project ignite by apache.
the class GridCacheMapEntry method innerUpdateLocal.
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public GridTuple3<Boolean, Object, EntryProcessorResult<Object>> innerUpdateLocal(GridCacheVersion ver, GridCacheOperation op, @Nullable Object writeObj, @Nullable Object[] invokeArgs, boolean writeThrough, boolean readThrough, boolean retval, boolean keepBinary, @Nullable ExpiryPolicy expiryPlc, boolean evt, boolean metrics, @Nullable CacheEntryPredicate[] filter, boolean intercept, @Nullable UUID subjId, String taskName) throws IgniteCheckedException, GridCacheEntryRemovedException {
assert cctx.isLocal() && cctx.atomic();
CacheObject old;
boolean res = true;
IgniteBiTuple<Boolean, ?> interceptorRes = null;
EntryProcessorResult<Object> invokeRes = null;
synchronized (this) {
boolean internal = isInternal() || !context().userCache();
Map<UUID, CacheContinuousQueryListener> lsnrCol = cctx.continuousQueries().updateListeners(internal, false);
boolean needVal = retval || intercept || op == GridCacheOperation.TRANSFORM || !F.isEmpty(filter) || lsnrCol != null;
checkObsolete();
CacheDataRow oldRow = null;
// Load and remove from swap if it is new.
if (isNew())
oldRow = unswap(retval, false);
old = val;
boolean readFromStore = false;
Object old0 = null;
if (readThrough && needVal && old == null && (cctx.readThrough() && (op == GridCacheOperation.TRANSFORM || cctx.loadPreviousValue()))) {
old0 = readThrough(null, key, false, subjId, taskName);
old = cctx.toCacheObject(old0);
long ttl = CU.TTL_ETERNAL;
long expireTime = CU.EXPIRE_TIME_ETERNAL;
if (expiryPlc != null && old != null) {
ttl = CU.toTtl(expiryPlc.getExpiryForCreation());
if (ttl == CU.TTL_ZERO) {
ttl = CU.TTL_MINIMUM;
expireTime = CU.expireTimeInPast();
} else if (ttl == CU.TTL_NOT_CHANGED)
ttl = CU.TTL_ETERNAL;
else
expireTime = CU.toExpireTime(ttl);
}
// Detach value before index update.
old = cctx.kernalContext().cacheObjects().prepareForCache(old, cctx);
if (old != null)
storeValue(old, expireTime, ver, oldRow);
else
removeValue();
update(old, expireTime, ttl, ver, true);
}
// Apply metrics.
if (metrics && cctx.cache().configuration().isStatisticsEnabled() && needVal) {
// PutIfAbsent methods mustn't update hit/miss statistics
if (op != GridCacheOperation.UPDATE || F.isEmpty(filter) || !cctx.putIfAbsentFilter(filter))
cctx.cache().metrics0().onRead(old != null);
}
// Check filter inside of synchronization.
if (!F.isEmpty(filter)) {
boolean pass = cctx.isAllLocked(this, filter);
if (!pass) {
if (expiryPlc != null && !readFromStore && !cctx.putIfAbsentFilter(filter) && hasValueUnlocked())
updateTtl(expiryPlc);
Object val = retval ? cctx.cacheObjectContext().unwrapBinaryIfNeeded(CU.value(old, cctx, false), keepBinary, false) : null;
return new T3<>(false, val, null);
}
}
String transformCloClsName = null;
CacheObject updated;
Object key0 = null;
Object updated0 = null;
// Calculate new value.
if (op == GridCacheOperation.TRANSFORM) {
transformCloClsName = EntryProcessorResourceInjectorProxy.unwrap(writeObj).getClass().getName();
EntryProcessor<Object, Object, ?> entryProcessor = (EntryProcessor<Object, Object, ?>) writeObj;
assert entryProcessor != null;
CacheInvokeEntry<Object, Object> entry = new CacheInvokeEntry<>(key, old, version(), keepBinary, this);
try {
Object computed = entryProcessor.process(entry, invokeArgs);
if (entry.modified()) {
updated0 = cctx.unwrapTemporary(entry.getValue());
updated = cctx.toCacheObject(updated0);
} else
updated = old;
key0 = entry.key();
invokeRes = computed != null ? CacheInvokeResult.fromResult(cctx.unwrapTemporary(computed)) : null;
} catch (Exception e) {
updated = old;
invokeRes = CacheInvokeResult.fromError(e);
}
if (!entry.modified()) {
if (expiryPlc != null && !readFromStore && hasValueUnlocked())
updateTtl(expiryPlc);
return new GridTuple3<>(false, null, invokeRes);
}
} else
updated = (CacheObject) writeObj;
op = updated == null ? GridCacheOperation.DELETE : GridCacheOperation.UPDATE;
if (intercept) {
CacheLazyEntry e;
if (op == GridCacheOperation.UPDATE) {
updated0 = value(updated0, updated, keepBinary, false);
e = new CacheLazyEntry(cctx, key, key0, old, old0, keepBinary);
Object interceptorVal = cctx.config().getInterceptor().onBeforePut(e, updated0);
if (interceptorVal == null)
return new GridTuple3<>(false, cctx.unwrapTemporary(value(old0, old, keepBinary, false)), invokeRes);
else {
updated0 = cctx.unwrapTemporary(interceptorVal);
updated = cctx.toCacheObject(updated0);
}
} else {
e = new CacheLazyEntry(cctx, key, key0, old, old0, keepBinary);
interceptorRes = cctx.config().getInterceptor().onBeforeRemove(e);
if (cctx.cancelRemove(interceptorRes))
return new GridTuple3<>(false, cctx.unwrapTemporary(interceptorRes.get2()), invokeRes);
}
key0 = e.key();
old0 = e.value();
}
boolean hadVal = hasValueUnlocked();
long ttl = CU.TTL_ETERNAL;
long expireTime = CU.EXPIRE_TIME_ETERNAL;
if (op == GridCacheOperation.UPDATE) {
if (expiryPlc != null) {
ttl = CU.toTtl(hadVal ? expiryPlc.getExpiryForUpdate() : expiryPlc.getExpiryForCreation());
if (ttl == CU.TTL_NOT_CHANGED) {
ttl = ttlExtras();
expireTime = expireTimeExtras();
} else if (ttl != CU.TTL_ZERO)
expireTime = CU.toExpireTime(ttl);
} else {
ttl = ttlExtras();
expireTime = expireTimeExtras();
}
}
if (ttl == CU.TTL_ZERO)
op = GridCacheOperation.DELETE;
// Try write-through.
if (op == GridCacheOperation.UPDATE) {
// Detach value before index update.
updated = cctx.kernalContext().cacheObjects().prepareForCache(updated, cctx);
if (writeThrough)
// Must persist inside synchronization in non-tx mode.
cctx.store().put(null, key, updated, ver);
storeValue(updated, expireTime, ver, oldRow);
assert ttl != CU.TTL_ZERO;
update(updated, expireTime, ttl, ver, true);
if (evt) {
CacheObject evtOld = null;
if (transformCloClsName != null && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) {
evtOld = cctx.unwrapTemporary(old);
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_READ, evtOld, evtOld != null || hadVal, evtOld, evtOld != null || hadVal, subjId, transformCloClsName, taskName, keepBinary);
}
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_PUT)) {
if (evtOld == null)
evtOld = cctx.unwrapTemporary(old);
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_PUT, updated, updated != null, evtOld, evtOld != null || hadVal, subjId, null, taskName, keepBinary);
}
}
} else {
if (writeThrough)
// Must persist inside synchronization in non-tx mode.
cctx.store().remove(null, key);
removeValue();
update(null, CU.TTL_ETERNAL, CU.EXPIRE_TIME_ETERNAL, ver, true);
if (evt) {
CacheObject evtOld = null;
if (transformCloClsName != null && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_READ, evtOld, evtOld != null || hadVal, evtOld, evtOld != null || hadVal, subjId, transformCloClsName, taskName, keepBinary);
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED)) {
if (evtOld == null)
evtOld = cctx.unwrapTemporary(old);
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_REMOVED, null, false, evtOld, evtOld != null || hadVal, subjId, null, taskName, keepBinary);
}
}
res = hadVal;
}
if (res)
updateMetrics(op, metrics);
if (lsnrCol != null) {
long updateCntr = nextPartCounter(AffinityTopologyVersion.NONE);
cctx.continuousQueries().onEntryUpdated(lsnrCol, key, val, old, internal, partition(), true, false, updateCntr, null, AffinityTopologyVersion.NONE);
onUpdateFinished(updateCntr);
}
cctx.dataStructures().onEntryUpdated(key, op == GridCacheOperation.DELETE, keepBinary);
if (intercept) {
if (op == GridCacheOperation.UPDATE)
cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(cctx, key, key0, updated, updated0, keepBinary, 0L));
else
cctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(cctx, key, key0, old, old0, keepBinary, 0L));
}
}
return new GridTuple3<>(res, cctx.unwrapTemporary(interceptorRes != null ? interceptorRes.get2() : cctx.cacheObjectContext().unwrapBinaryIfNeeded(old, keepBinary, false)), invokeRes);
}
Aggregations