use of org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry in project ignite by apache.
the class RecordDataV2Serializer method readMvccDataEntry.
/**
* @param in Input to read from.
* @return Read entry.
*/
private MvccDataEntry readMvccDataEntry(ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException {
int cacheId = in.readInt();
int keySize = in.readInt();
byte keyType = in.readByte();
byte[] keyBytes = new byte[keySize];
in.readFully(keyBytes);
int valSize = in.readInt();
byte valType = 0;
byte[] valBytes = null;
if (valSize >= 0) {
valType = in.readByte();
valBytes = new byte[valSize];
in.readFully(valBytes);
}
byte ord = in.readByte();
GridCacheOperation op = GridCacheOperation.fromOrdinal(ord & 0xFF);
GridCacheVersion nearXidVer = readVersion(in, true);
GridCacheVersion writeVer = readVersion(in, false);
int partId = in.readInt();
long partCntr = in.readLong();
long expireTime = in.readLong();
MvccVersion mvccVer = txRecordSerializer.readMvccVersion(in);
GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
if (cacheCtx != null) {
CacheObjectContext coCtx = cacheCtx.cacheObjectContext();
KeyCacheObject key = co.toKeyCacheObject(coCtx, keyType, keyBytes);
if (key.partition() == -1)
key.partition(partId);
CacheObject val = valBytes != null ? co.toCacheObject(coCtx, valType, valBytes) : null;
return new MvccDataEntry(cacheId, key, val, op, nearXidVer, writeVer, expireTime, partId, partCntr, mvccVer);
} else
return new LazyMvccDataEntry(cctx, cacheId, keyType, keyBytes, valType, valBytes, op, nearXidVer, writeVer, expireTime, partId, partCntr, mvccVer);
}
use of org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry in project ignite by apache.
the class GridCacheMapEntry method initialValue.
/**
* {@inheritDoc}
*/
@Override
public boolean initialValue(CacheObject val, GridCacheVersion ver, MvccVersion mvccVer, MvccVersion newMvccVer, byte mvccTxState, byte newMvccTxState, long ttl, long expireTime, boolean preload, AffinityTopologyVersion topVer, GridDrType drType, boolean fromStore, boolean primary, CacheDataRow row) throws IgniteCheckedException, GridCacheEntryRemovedException {
assert !primary || !(preload || fromStore);
ensureFreeSpace();
boolean deferred = false;
boolean obsolete = false;
GridCacheVersion oldVer = null;
lockListenerReadLock();
lockEntry();
try {
checkObsolete();
boolean walEnabled = !cctx.isNear() && cctx.group().persistenceEnabled() && cctx.group().walEnabled();
long expTime = expireTime < 0 ? CU.toExpireTime(ttl) : expireTime;
val = cctx.kernalContext().cacheObjects().prepareForCache(val, cctx);
final boolean unswapped = ((flags & IS_UNSWAPPED_MASK) != 0);
boolean update;
IgnitePredicate<CacheDataRow> p = new IgnitePredicate<CacheDataRow>() {
@Override
public boolean apply(@Nullable CacheDataRow row) {
boolean update0;
GridCacheVersion currentVer = row != null ? row.version() : GridCacheMapEntry.this.ver;
boolean isStartVer = cctx.shared().versions().isStartVersion(currentVer);
if (cctx.group().persistenceEnabled()) {
if (!isStartVer) {
if (cctx.atomic())
update0 = ATOMIC_VER_COMPARATOR.compare(currentVer, ver) < 0;
else
update0 = currentVer.compareTo(ver) < 0;
} else
update0 = true;
} else
update0 = isStartVer;
update0 |= (!preload && deletedUnlocked());
return update0;
}
};
if (unswapped) {
update = p.apply(null);
if (update) {
// If entry is already unswapped and we are modifying it, we must run deletion callbacks for old value.
long oldExpTime = expireTimeUnlocked();
if (oldExpTime > 0 && oldExpTime < U.currentTimeMillis()) {
if (onExpired(this.val, null)) {
if (cctx.deferredDelete()) {
deferred = true;
oldVer = this.ver;
} else if (val == null)
obsolete = true;
}
}
if (cctx.mvccEnabled()) {
assert !preload;
cctx.offheap().mvccInitialValue(this, val, ver, expTime, mvccVer, newMvccVer);
} else
storeValue(val, expTime, ver, null, row);
}
} else {
if (cctx.mvccEnabled()) {
// cannot identify whether the entry is exist on the fly
unswap(false);
if (update = p.apply(null)) {
// If entry is already unswapped and we are modifying it, we must run deletion callbacks for old value.
long oldExpTime = expireTimeUnlocked();
long delta = (oldExpTime == 0 ? 0 : oldExpTime - U.currentTimeMillis());
if (delta < 0) {
if (onExpired(this.val, null)) {
if (cctx.deferredDelete()) {
deferred = true;
oldVer = this.ver;
} else if (val == null)
obsolete = true;
}
}
assert !preload;
cctx.offheap().mvccInitialValue(this, val, ver, expTime, mvccVer, newMvccVer);
}
} else
// Optimization to access storage only once.
update = storeValue(val, expTime, ver, p, row);
}
if (update) {
update(val, expTime, ttl, ver, true);
boolean skipQryNtf = false;
if (val == null) {
skipQryNtf = true;
if (cctx.deferredDelete() && !deletedUnlocked() && !isInternal())
deletedUnlocked(true);
} else if (deletedUnlocked())
deletedUnlocked(false);
long updateCntr = 0;
if (!preload)
updateCntr = nextPartitionCounter(topVer, true, true, null);
if (walEnabled) {
if (cctx.mvccEnabled()) {
cctx.shared().wal().log(new MvccDataRecord(new MvccDataEntry(cctx.cacheId(), key, val, val == null ? DELETE : GridCacheOperation.CREATE, null, ver, expireTime, partition(), updateCntr, mvccVer == null ? MvccUtils.INITIAL_VERSION : mvccVer)));
} else {
cctx.shared().wal().log(new DataRecord(new DataEntry(cctx.cacheId(), key, val, val == null ? DELETE : GridCacheOperation.CREATE, null, ver, expireTime, partition(), updateCntr, DataEntry.flags(primary, preload, fromStore))));
}
}
drReplicate(drType, val, ver, topVer);
if (!skipQryNtf) {
cctx.continuousQueries().onEntryUpdated(key, val, null, this.isInternal() || !this.context().userCache(), this.partition(), true, preload, updateCntr, null, topVer);
}
updatePlatformCache(val, topVer);
onUpdateFinished(updateCntr);
if (!fromStore && cctx.store().isLocal()) {
if (val != null)
cctx.store().put(null, key, val, ver);
}
return true;
}
return false;
} finally {
unlockEntry();
unlockListenerReadLock();
if (obsolete) {
onMarkedObsolete();
cctx.cache().removeEntry(this);
}
if (deferred) {
assert oldVer != null;
cctx.onDeferredDelete(this, oldVer);
}
}
}
use of org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry in project ignite by apache.
the class RecordDataV1Serializer method entrySize.
/**
* @param entry Entry to get size for.
* @return Entry size.
* @throws IgniteCheckedException If failed to get key or value bytes length.
*/
protected int entrySize(DataEntry entry) throws IgniteCheckedException {
GridCacheContext cctx = this.cctx.cacheContext(entry.cacheId());
CacheObjectContext coCtx = cctx.cacheObjectContext();
return /*cache ID*/
4 + /*key*/
entry.key().valueBytesLength(coCtx) + /*value*/
(entry.value() == null ? 4 : entry.value().valueBytesLength(coCtx)) + /*op*/
1 + /*near xid ver*/
CacheVersionIO.size(entry.nearXidVersion(), true) + /*write ver*/
CacheVersionIO.size(entry.writeVersion(), false) + /*part ID*/
4 + /*expire Time*/
8 + /*part cnt*/
8 + /*primary*/
(entry instanceof MvccDataEntry ? 0 : 1);
}
use of org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method checkDataWalEntries.
/**
* @throws Exception if failed.
*/
private void checkDataWalEntries(boolean mvcc) throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(mvcc ? new MvccDataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, new MvccVersionImpl(1000L, 10L, i + 1)) : new DataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, DataEntry.EMPTY_FLAGS));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.flush(start, false);
for (DataEntry entry : entries) wal.log(mvcc ? new MvccDataRecord((MvccDataEntry) entry) : new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.cluster().active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
if (!mvcc)
assert dataRecTup.get2() instanceof DataRecord;
else
assert dataRecTup.get2() instanceof MvccDataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.entryCount());
DataEntry readEntry = dataRec.get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
if (mvcc) {
assert entry instanceof MvccDataEntry;
assert readEntry instanceof MvccDataEntry;
assertEquals(((MvccDataEntry) entry).mvccVer(), ((MvccDataEntry) readEntry).mvccVer());
}
idx++;
}
}
}
use of org.apache.ignite.internal.pagemem.wal.record.MvccDataEntry in project ignite by apache.
the class GridCacheMapEntry method mvccSet.
/**
* {@inheritDoc}
*/
@Override
public final GridCacheUpdateTxResult mvccSet(IgniteInternalTx tx, UUID affNodeId, CacheObject val, EntryProcessor entryProc, Object[] invokeArgs, long ttl0, AffinityTopologyVersion topVer, MvccSnapshot mvccVer, GridCacheOperation op, boolean needHistory, boolean noCreate, boolean needOldVal, CacheEntryPredicate filter, boolean retVal, boolean keepBinary) throws IgniteCheckedException, GridCacheEntryRemovedException {
assert tx != null;
final boolean valid = valid(tx.topologyVersion());
final boolean invoke = entryProc != null;
final GridCacheVersion newVer;
WALPointer logPtr = null;
ensureFreeSpace();
lockEntry();
MvccUpdateResult res;
try {
checkObsolete();
newVer = tx.writeVersion();
assert newVer != null : "Failed to get write version for tx: " + tx;
// Determine new ttl and expire time.
long expireTime, ttl = ttl0;
if (ttl == -1L) {
ttl = ttlExtras();
expireTime = expireTimeExtras();
} else
expireTime = CU.toExpireTime(ttl);
assert ttl >= 0 : ttl;
assert expireTime >= 0 : expireTime;
// Detach value before index update.
val = cctx.kernalContext().cacheObjects().prepareForCache(val, cctx);
assert val != null || invoke;
res = cctx.offheap().mvccUpdate(this, val, newVer, expireTime, mvccVer, tx.local(), needHistory, noCreate, needOldVal, filter, retVal, keepBinary, entryProc, invokeArgs);
assert res != null;
// updating the key which just has been rebalanced.
assert res.resultType() != ResultType.VERSION_FOUND || op == CREATE && tx.local() || !tx.local();
// PREV_NOT_NULL on CREATE is possible only on primary.
assert res.resultType() != ResultType.PREV_NOT_NULL || op != CREATE || tx.local();
if (res.resultType() == ResultType.VERSION_MISMATCH)
throw serializationError();
else if (res.resultType() == ResultType.FILTERED) {
GridCacheUpdateTxResult updRes = new GridCacheUpdateTxResult(invoke);
assert !invoke || res.invokeResult() != null;
if (// No-op invoke happened.
invoke)
updRes.invokeResult(res.invokeResult());
updRes.filtered(true);
if (retVal)
updRes.prevValue(res.oldValue());
return updRes;
} else if (noCreate && !invoke && res.resultType() == ResultType.PREV_NULL)
return new GridCacheUpdateTxResult(false);
else if (res.resultType() == ResultType.LOCKED) {
unlockEntry();
MvccVersion lockVer = res.resultVersion();
GridFutureAdapter<GridCacheUpdateTxResult> resFut = new GridFutureAdapter<>();
IgniteInternalFuture<?> lockFut = cctx.kernalContext().coordinators().waitForLock(cctx, mvccVer, lockVer);
lockFut.listen(new MvccUpdateLockListener(tx, this, affNodeId, topVer, val, ttl0, mvccVer, op, needHistory, noCreate, resFut, needOldVal, filter, retVal, keepBinary, entryProc, invokeArgs));
return new GridCacheUpdateTxResult(false, resFut);
} else if (op == CREATE && tx.local() && (res.resultType() == ResultType.PREV_NOT_NULL || res.resultType() == ResultType.VERSION_FOUND))
throw new IgniteTxDuplicateKeyCheckedException("Duplicate key during INSERT [key=" + key + ']');
if (cctx.deferredDelete() && deletedUnlocked() && !detached())
deletedUnlocked(false);
if (res.resultType() == ResultType.PREV_NULL) {
TxCounters counters = tx.txCounters(true);
if (compareIgnoreOpCounter(res.resultVersion(), mvccVer) == 0) {
if (res.isKeyAbsentBefore())
counters.incrementUpdateCounter(cctx.cacheId(), partition());
} else
counters.incrementUpdateCounter(cctx.cacheId(), partition());
counters.accumulateSizeDelta(cctx.cacheId(), partition(), 1);
} else if (res.resultType() == ResultType.PREV_NOT_NULL && compareIgnoreOpCounter(res.resultVersion(), mvccVer) != 0) {
TxCounters counters = tx.txCounters(true);
counters.incrementUpdateCounter(cctx.cacheId(), partition());
} else if (res.resultType() == ResultType.REMOVED_NOT_NULL) {
TxCounters counters = tx.txCounters(true);
if (compareIgnoreOpCounter(res.resultVersion(), mvccVer) == 0) {
if (// Do not count own update removal.
res.isKeyAbsentBefore())
counters.decrementUpdateCounter(cctx.cacheId(), partition());
} else
counters.incrementUpdateCounter(cctx.cacheId(), partition());
counters.accumulateSizeDelta(cctx.cacheId(), partition(), -1);
}
if (cctx.group().persistenceEnabled() && cctx.group().walEnabled()) {
logPtr = cctx.shared().wal().log(new MvccDataRecord(new MvccDataEntry(cctx.cacheId(), key, val, res.resultType() == ResultType.PREV_NULL ? CREATE : (res.resultType() == ResultType.REMOVED_NOT_NULL) ? DELETE : UPDATE, tx.nearXidVersion(), newVer, expireTime, key.partition(), 0L, mvccVer)));
}
update(val, expireTime, ttl, newVer, true);
recordNodeId(affNodeId, topVer);
} finally {
if (lockedByCurrentThread()) {
unlockEntry();
cctx.evicts().touch(this);
}
}
onUpdateFinished(0L);
GridCacheUpdateTxResult updRes = valid ? new GridCacheUpdateTxResult(true, 0L, logPtr) : new GridCacheUpdateTxResult(false, logPtr);
if (retVal && (res.resultType() == ResultType.PREV_NOT_NULL || res.resultType() == ResultType.VERSION_FOUND))
updRes.prevValue(res.oldValue());
if (needOldVal && compareIgnoreOpCounter(res.resultVersion(), mvccVer) != 0 && (res.resultType() == ResultType.PREV_NOT_NULL || res.resultType() == ResultType.REMOVED_NOT_NULL))
updRes.oldValue(res.oldValue());
updRes.newValue(res.newValue());
if (invoke && res.resultType() != ResultType.VERSION_FOUND) {
assert res.invokeResult() != null;
updRes.invokeResult(res.invokeResult());
}
updRes.mvccHistory(res.history());
return updRes;
}
Aggregations