use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridPartitionedSingleGetFuture method localGet.
/**
* @param topVer Topology version.
* @param part Partition.
* @return {@code True} if future completed.
*/
private boolean localGet(AffinityTopologyVersion topVer, KeyCacheObject key, int part) {
assert cctx.affinityNode() : this;
GridDhtCacheAdapter colocated = cctx.dht();
boolean readNoEntry = cctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
while (true) {
cctx.shared().database().checkpointReadLock();
try {
CacheObject v = null;
GridCacheVersion ver = null;
boolean skipEntry = readNoEntry;
if (readNoEntry) {
KeyCacheObject key0 = (KeyCacheObject) cctx.cacheObjects().prepareForCache(key, cctx);
CacheDataRow row = mvccSnapshot != null ? cctx.offheap().mvccRead(cctx, key0, mvccSnapshot) : cctx.offheap().read(cctx, key0);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
v = row.value();
if (needVer)
ver = row.version();
if (evt) {
cctx.events().readEvent(key, null, txLbl, row.value(), taskName, !deserializeBinary);
}
} else
skipEntry = false;
}
}
if (!skipEntry) {
GridCacheEntryEx entry = colocated.entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
if (needVer) {
EntryGetResult res = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, true, null);
if (res != null) {
v = res.value();
ver = res.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, true);
}
entry.touch();
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
if (isNew && entry.markObsoleteIfEmpty(ver))
colocated.removeEntry(entry);
}
}
}
if (v != null) {
if (!skipVals && cctx.statisticsEnabled())
cctx.cache().metrics0().onRead(true);
if (!skipVals)
setResult(v, ver);
else
setSkipValueResult(true, ver);
return true;
}
boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().lastTopologyChangeVersion());
// Entry not found, complete future with null result if topology did not change and there is no store.
if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
if (!skipVals && cctx.statisticsEnabled())
colocated.metrics0().onRead(false);
if (skipVals)
setSkipValueResult(false, null);
else
setResult(null, null);
return true;
}
return false;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, will retry.
} catch (GridDhtInvalidPartitionException ignored) {
return false;
} catch (IgniteCheckedException e) {
onDone(e);
return true;
} finally {
cctx.shared().database().checkpointReadUnlock();
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridCacheMapEntry method innerUpdate.
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
public GridCacheUpdateAtomicResult innerUpdate(final GridCacheVersion newVer, final UUID evtNodeId, final UUID affNodeId, final GridCacheOperation op, @Nullable final Object writeObj, @Nullable final Object[] invokeArgs, final boolean writeThrough, final boolean readThrough, final boolean retval, final boolean keepBinary, @Nullable final IgniteCacheExpiryPolicy expiryPlc, final boolean evt, final boolean metrics, final boolean primary, final boolean verCheck, final AffinityTopologyVersion topVer, @Nullable final CacheEntryPredicate[] filter, final GridDrType drType, final long explicitTtl, final long explicitExpireTime, @Nullable final GridCacheVersion conflictVer, final boolean conflictResolve, final boolean intercept, final String taskName, @Nullable final CacheObject prevVal, @Nullable final Long updateCntr, @Nullable final GridDhtAtomicAbstractUpdateFuture fut, boolean transformOp) throws IgniteCheckedException, GridCacheEntryRemovedException, GridClosureException {
assert cctx.atomic() && !detached();
AtomicCacheUpdateClosure c;
if (!primary && !isNear())
ensureFreeSpace();
lockListenerReadLock();
lockEntry();
try {
checkObsolete();
boolean internal = isInternal() || !context().userCache();
Map<UUID, CacheContinuousQueryListener> lsnrs = cctx.continuousQueries().updateListeners(internal, false);
boolean needVal = lsnrs != null || intercept || retval || op == GridCacheOperation.TRANSFORM || !F.isEmptyOrNulls(filter);
// Possibly read value from store.
boolean readFromStore = readThrough && needVal && (cctx.readThrough() && (op == GridCacheOperation.TRANSFORM || cctx.loadPreviousValue()));
c = new AtomicCacheUpdateClosure(this, topVer, newVer, op, writeObj, invokeArgs, readFromStore, writeThrough, keepBinary, expiryPlc, primary, verCheck, filter, explicitTtl, explicitExpireTime, conflictVer, conflictResolve, intercept, updateCntr, cctx.disableTriggeringCacheInterceptorOnConflict());
key.valueBytes(cctx.cacheObjectContext());
if (isNear()) {
CacheDataRow dataRow = val != null ? new CacheDataRowAdapter(key, val, ver, expireTimeExtras()) : null;
c.call(dataRow);
} else
cctx.offheap().invoke(cctx, key, localPartition(), c);
GridCacheUpdateAtomicResult updateRes = c.updateRes;
assert updateRes != null : c;
// We should ignore expired old row. Expired oldRow instance is needed for correct row replacement\deletion only.
CacheObject oldVal = c.oldRow != null && !c.oldRowExpiredFlag ? c.oldRow.value() : null;
CacheObject updateVal = null;
GridCacheVersion updateVer = c.newVer;
boolean updateMetrics = metrics && cctx.statisticsEnabled();
// Apply metrics.
if (updateMetrics && updateRes.outcome().updateReadMetrics() && needVal)
cctx.cache().metrics0().onRead(oldVal != null);
if (updateMetrics && INVOKE_NO_OP.equals(updateRes.outcome()) && (transformOp || updateRes.transformed()))
cctx.cache().metrics0().onReadOnlyInvoke(oldVal != null);
else if (updateMetrics && REMOVE_NO_VAL.equals(updateRes.outcome()) && (transformOp || updateRes.transformed()))
cctx.cache().metrics0().onInvokeRemove(oldVal != null);
switch(updateRes.outcome()) {
case VERSION_CHECK_FAILED:
{
if (!cctx.isNear()) {
CacheObject evtVal;
if (op == GridCacheOperation.TRANSFORM) {
EntryProcessor<Object, Object, ?> entryProcessor = (EntryProcessor<Object, Object, ?>) writeObj;
CacheInvokeEntry<Object, Object> entry = new CacheInvokeEntry<>(key, prevVal, version(), keepBinary, this);
IgniteThread.onEntryProcessorEntered(true);
try {
entryProcessor.process(entry, invokeArgs);
evtVal = entry.modified() ? cctx.toCacheObject(cctx.unwrapTemporary(entry.getValue())) : prevVal;
} catch (Exception ignore) {
evtVal = prevVal;
} finally {
IgniteThread.onEntryProcessorLeft();
}
} else
evtVal = (CacheObject) writeObj;
assert !primary && updateCntr != null;
onUpdateFinished(updateCntr);
cctx.continuousQueries().onEntryUpdated(key, evtVal, prevVal, isInternal() || !context().userCache(), partition(), primary, false, updateCntr, null, topVer);
}
return updateRes;
}
case CONFLICT_USE_OLD:
case FILTER_FAILED:
case INVOKE_NO_OP:
case INTERCEPTOR_CANCEL:
return updateRes;
}
assert updateRes.outcome() == UpdateOutcome.SUCCESS || updateRes.outcome() == UpdateOutcome.REMOVE_NO_VAL;
CacheObject evtOld = null;
if (evt && op == TRANSFORM && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) {
assert writeObj instanceof EntryProcessor : writeObj;
evtOld = cctx.unwrapTemporary(oldVal);
Object transformClo = EntryProcessorResourceInjectorProxy.unwrap(writeObj);
cctx.events().addEvent(partition(), key, evtNodeId, null, null, updateVer, EVT_CACHE_OBJECT_READ, evtOld, evtOld != null, evtOld, evtOld != null, transformClo.getClass().getName(), taskName, keepBinary);
}
if (c.op == UPDATE) {
updateVal = val;
assert updateVal != null : c;
drReplicate(drType, updateVal, updateVer, topVer);
recordNodeId(affNodeId, topVer);
if (evt && cctx.events().isRecordable(EVT_CACHE_OBJECT_PUT)) {
if (evtOld == null)
evtOld = cctx.unwrapTemporary(oldVal);
cctx.events().addEvent(partition(), key, evtNodeId, null, null, updateVer, EVT_CACHE_OBJECT_PUT, updateVal, true, evtOld, evtOld != null, null, taskName, keepBinary);
}
} else {
assert c.op == DELETE : c.op;
clearReaders();
drReplicate(drType, null, updateVer, topVer);
recordNodeId(affNodeId, topVer);
if (evt && cctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED)) {
if (evtOld == null)
evtOld = cctx.unwrapTemporary(oldVal);
cctx.events().addEvent(partition(), key, evtNodeId, null, null, updateVer, EVT_CACHE_OBJECT_REMOVED, null, false, evtOld, evtOld != null, null, taskName, keepBinary);
}
}
if (updateRes.success())
updateMetrics(c.op, metrics, transformOp || updateRes.transformed(), oldVal != null);
// Continuous query filter should be perform under lock.
if (lsnrs != null) {
CacheObject evtVal = cctx.unwrapTemporary(updateVal);
CacheObject evtOldVal = cctx.unwrapTemporary(oldVal);
cctx.continuousQueries().onEntryUpdated(lsnrs, key, evtVal, evtOldVal, internal, partition(), primary, false, c.updateRes.updateCounter(), fut, topVer);
}
if (intercept && c.wasIntercepted) {
assert c.op == UPDATE || c.op == DELETE : c.op;
Cache.Entry<?, ?> entry = new CacheLazyEntry<>(cctx, key, null, c.op == UPDATE ? updateVal : oldVal, null, keepBinary, c.updateRes.updateCounter());
if (c.op == UPDATE)
cctx.config().getInterceptor().onAfterPut(entry);
else
cctx.config().getInterceptor().onAfterRemove(entry);
}
updatePlatformCache(c.op == UPDATE ? updateVal : null, topVer);
} finally {
unlockEntry();
unlockListenerReadLock();
}
onUpdateFinished(c.updateRes.updateCounter());
return c.updateRes;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridCacheMapEntry method unswap.
/**
* Unswaps an entry.
*
* @param row Already extracted cache data.
* @param checkExpire If {@code true} checks for expiration, as result entry can be obsoleted or marked deleted.
* @return Value.
* @throws IgniteCheckedException If failed.
* @throws GridCacheEntryRemovedException If entry was removed.
*/
@Nullable
protected CacheDataRow unswap(@Nullable CacheDataRow row, boolean checkExpire) throws IgniteCheckedException, GridCacheEntryRemovedException {
boolean obsolete = false;
boolean deferred = false;
GridCacheVersion ver0 = null;
cctx.shared().database().checkpointReadLock();
lockEntry();
try {
checkObsolete();
if (isStartVersion() && ((flags & IS_UNSWAPPED_MASK) == 0)) {
assert row == null || Objects.equals(row.key(), key) : "Unexpected row key [row.key=" + row.key() + ", cacheEntry.key=" + key + "]";
CacheDataRow read = row == null ? cctx.offheap().read(this) : row;
flags |= IS_UNSWAPPED_MASK;
if (read != null) {
CacheObject val = read.value();
update(val, read.expireTime(), 0, read.version(), false);
if (!(checkExpire && read.expireTime() > 0) || (read.expireTime() > U.currentTimeMillis()))
return read;
else {
if (onExpired(this.val, null)) {
if (cctx.deferredDelete()) {
deferred = true;
ver0 = ver;
} else
obsolete = true;
}
}
}
}
} finally {
unlockEntry();
cctx.shared().database().checkpointReadUnlock();
}
if (obsolete) {
onMarkedObsolete();
cctx.cache().removeEntry(this);
}
if (deferred) {
assert ver0 != null;
cctx.onDeferredDelete(this, ver0);
}
return null;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridCacheMapEntry method allVersionsInfo.
/**
* {@inheritDoc}
*/
@Nullable
@Override
public List<GridCacheEntryInfo> allVersionsInfo() throws IgniteCheckedException {
assert cctx.mvccEnabled();
lockEntry();
try {
if (obsolete())
return Collections.emptyList();
GridCursor<? extends CacheDataRow> cur = cctx.offheap().dataStore(localPartition()).mvccAllVersionsCursor(cctx, key, NO_KEY);
List<GridCacheEntryInfo> res = new ArrayList<>();
while (cur.next()) {
CacheDataRow row = cur.get();
GridCacheMvccEntryInfo info = new GridCacheMvccEntryInfo();
info.key(key);
info.value(row.value());
info.cacheId(cctx.cacheId());
info.version(row.version());
info.setNew(false);
info.setDeleted(false);
byte txState = row.mvccTxState() != TxState.NA ? row.mvccTxState() : MvccUtils.state(cctx, row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter());
if (txState == TxState.ABORTED)
continue;
info.mvccVersion(row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter());
info.mvccTxState(txState);
byte newTxState = row.newMvccTxState() != TxState.NA ? row.newMvccTxState() : MvccUtils.state(cctx, row.newMvccCoordinatorVersion(), row.newMvccCounter(), row.newMvccOperationCounter());
if (newTxState != TxState.ABORTED) {
info.newMvccVersion(row.newMvccCoordinatorVersion(), row.newMvccCounter(), row.newMvccOperationCounter());
info.newMvccTxState(newTxState);
}
long expireTime = row.expireTime();
long ttl;
ttl = expireTime == CU.EXPIRE_TIME_ETERNAL ? CU.TTL_ETERNAL : expireTime - U.currentTimeMillis();
if (ttl < 0)
ttl = CU.TTL_MINIMUM;
info.ttl(ttl);
info.expireTime(expireTime);
res.add(info);
}
return res;
} finally {
unlockEntry();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IdleVerifyUtility method calculatePartitionHash.
/**
* @param partKey Partition key.
* @param updCntr Partition update counter prior check.
* @param consId Local node consistent id.
* @param state Partition state to check.
* @param isPrimary {@code true} if partition is primary.
* @param partSize Partition size on disk.
* @param it Iterator though partition data rows.
* @throws IgniteCheckedException If fails.
* @return Map of calculated partition.
*/
@Nullable
public static PartitionHashRecordV2 calculatePartitionHash(PartitionKeyV2 partKey, long updCntr, Object consId, GridDhtPartitionState state, boolean isPrimary, long partSize, GridIterator<CacheDataRow> it) throws IgniteCheckedException {
if (state == GridDhtPartitionState.MOVING || state == GridDhtPartitionState.LOST) {
return new PartitionHashRecordV2(partKey, isPrimary, consId, 0, updCntr, state == GridDhtPartitionState.MOVING ? PartitionHashRecordV2.MOVING_PARTITION_SIZE : 0, state == GridDhtPartitionState.MOVING ? PartitionHashRecordV2.PartitionState.MOVING : PartitionHashRecordV2.PartitionState.LOST);
}
if (state != GridDhtPartitionState.OWNING)
return null;
int partHash = 0;
while (it.hasNextX()) {
CacheDataRow row = it.nextX();
partHash += row.key().hashCode();
// Detects ABA problem.
partHash += row.version().hashCode();
// Object context is not required since the valueBytes have been read directly from page.
partHash += Arrays.hashCode(row.value().valueBytes(null));
}
return new PartitionHashRecordV2(partKey, isPrimary, consId, partHash, updCntr, partSize, PartitionHashRecordV2.PartitionState.OWNING);
}
Aggregations