use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgniteTxLocalAdapter method userCommit.
/**
* {@inheritDoc}
*/
@Override
public void userCommit() throws IgniteCheckedException {
TransactionState state = state();
if (state != COMMITTING) {
if (remainingTime() == -1)
throw new IgniteTxTimeoutCheckedException("Transaction timed out: " + this);
setRollbackOnly();
throw new IgniteCheckedException("Invalid transaction state for commit [state=" + state + ", tx=" + this + ']');
}
checkValid();
Collection<IgniteTxEntry> commitEntries = (near() || cctx.snapshot().needTxReadLogging()) ? allEntries() : writeEntries();
boolean empty = F.isEmpty(commitEntries) && !queryEnlisted();
// locks on backup nodes.
if (!empty || colocated())
cctx.tm().addCommittedTx(this);
if (!empty) {
batchStoreCommit(writeEntries());
WALPointer ptr = null;
IgniteCheckedException err = null;
cctx.database().checkpointReadLock();
try {
cctx.tm().txContext(this);
AffinityTopologyVersion topVer = topologyVersion();
TxCounters txCounters = txCounters(false);
/*
* Commit to cache. Note that for 'near' transaction we loop through all the entries.
*/
for (IgniteTxEntry txEntry : commitEntries) {
GridCacheContext cacheCtx = txEntry.context();
GridDrType drType = cacheCtx.isDrEnabled() ? DR_PRIMARY : DR_NONE;
UUID nodeId = txEntry.nodeId() == null ? this.nodeId : txEntry.nodeId();
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
// transaction manager to make sure locks are held.
if (!evictNearEntry(txEntry, false)) {
if (cacheCtx.isNear() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
if (cached.detached())
break;
boolean updateNearCache = updateNearCache(cacheCtx, txEntry.key(), topVer);
boolean metrics = true;
if (!updateNearCache && cacheCtx.isNear() && txEntry.locallyMapped())
metrics = false;
boolean evt = !isNearLocallyMapped(txEntry, false);
if (!F.isEmpty(txEntry.entryProcessors()) || !F.isEmpty(txEntry.filters()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, true, null);
GridCacheVersion dhtVer = null;
// backup remote transaction completes.
if (cacheCtx.isNear()) {
if (txEntry.op() == CREATE || txEntry.op() == UPDATE || txEntry.op() == DELETE || txEntry.op() == TRANSFORM)
dhtVer = txEntry.dhtVersion();
if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) {
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
txEntry.cached().unswap(false);
Duration duration = cached.hasValue() ? expiry.getExpiryForUpdate() : expiry.getExpiryForCreation();
txEntry.ttl(CU.toTtl(duration));
}
}
}
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
// Deal with conflicts.
GridCacheVersion explicitVer = txEntry.conflictVersion() != null ? txEntry.conflictVersion() : writeVersion();
if ((op == CREATE || op == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) {
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
Duration duration = cached.hasValue() ? expiry.getExpiryForUpdate() : expiry.getExpiryForCreation();
long ttl = CU.toTtl(duration);
txEntry.ttl(ttl);
if (ttl == CU.TTL_ZERO)
op = DELETE;
}
}
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext<?, ?> conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> conflictRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert conflictRes != null;
conflictCtx = conflictRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else {
assert conflictCtx.isMerge();
op = conflictRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
if (sndTransformedVals || conflictNeedResolve) {
assert sndTransformedVals && cacheCtx.isReplicated() || conflictNeedResolve;
txEntry.value(val, true, false);
txEntry.op(op);
txEntry.entryProcessors(null);
txEntry.conflictVersion(explicitVer);
}
if (dhtVer == null)
dhtVer = explicitVer != null ? explicitVer : writeVersion();
if (op == CREATE || op == UPDATE) {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), txEntry.nodeId(), val, false, false, txEntry.ttl(), evt, metrics, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, cached.detached() ? DR_NONE : drType, txEntry.conflictExpireTime(), cached.isNear() ? null : explicitVer, resolveTaskName(), dhtVer, null);
if (updRes.success())
txEntry.updateCounter(updRes.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
if (updRes.success() && updateNearCache) {
final CacheObject val0 = val;
final boolean metrics0 = metrics;
final GridCacheVersion dhtVer0 = dhtVer;
updateNearEntrySafely(cacheCtx, txEntry.key(), entry -> entry.innerSet(null, eventNodeId(), nodeId, val0, false, false, txEntry.ttl(), false, metrics0, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, CU.empty0(), DR_NONE, txEntry.conflictExpireTime(), null, resolveTaskName(), dhtVer0, null));
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), txEntry.nodeId(), false, evt, metrics, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, cached.detached() ? DR_NONE : drType, cached.isNear() ? null : explicitVer, resolveTaskName(), dhtVer, null);
if (updRes.success())
txEntry.updateCounter(updRes.updateCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
if (updRes.success() && updateNearCache) {
final boolean metrics0 = metrics;
final GridCacheVersion dhtVer0 = dhtVer;
updateNearEntrySafely(cacheCtx, txEntry.key(), entry -> entry.innerRemove(null, eventNodeId(), nodeId, false, false, metrics0, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, CU.empty0(), DR_NONE, null, resolveTaskName(), dhtVer0, null));
}
} else if (op == RELOAD) {
cached.innerReload();
if (updateNearCache)
updateNearEntrySafely(cacheCtx, txEntry.key(), entry -> entry.innerReload());
} else if (op == READ) {
CacheGroupContext grp = cacheCtx.group();
if (grp.persistenceEnabled() && grp.walEnabled() && cctx.snapshot().needTxReadLogging()) {
ptr = cctx.wal().log(new DataRecord(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), writeVersion(), 0, txEntry.key().partition(), txEntry.updateCounter(), DataEntry.flags(CU.txOnPrimary(this)))));
}
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
Duration duration = expiry.getExpiryForAccess();
if (duration != null)
cached.updateTtl(null, CU.toTtl(duration));
}
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else {
assert ownsLock(txEntry.cached()) : "Transaction does not own lock for group lock entry during commit [tx=" + this + ", txEntry=" + txEntry + ']';
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
}
if (log.isDebugEnabled())
log.debug("Ignoring NOOP entry when committing: " + txEntry);
}
}
// if an entry is obsolete).
if (txEntry.op() != READ)
checkCommitLocks(cached);
// Break out of while loop.
break;
}// If entry cached within transaction got removed.
catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry during transaction commit (will retry): " + txEntry);
txEntry.cached(entryEx(cacheCtx, txEntry.txKey(), topologyVersion()));
}
}
}
if (!txState.mvccEnabled() && txCounters != null) {
cctx.tm().txHandler().applyPartitionsUpdatesCounters(txCounters.updateCounters());
for (IgniteTxEntry entry : commitEntries) {
if (entry.cqNotifyClosure() != null)
entry.cqNotifyClosure().applyx();
}
}
// Apply cache sizes only for primary nodes. Update counters were applied on prepare state.
applyTxSizes();
cctx.mvccCaching().onTxFinished(this, true);
if (ptr != null)
cctx.wal().flush(ptr, false);
} catch (Throwable ex) {
// We are about to initiate transaction rollback when tx has started to committing.
// Need to remove version from committed list.
cctx.tm().removeCommittedTx(this);
if (X.hasCause(ex, NodeStoppingException.class)) {
U.warn(log, "Failed to commit transaction, node is stopping [tx=" + CU.txString(this) + ", err=" + ex + ']');
boolean persistenceEnabled = CU.isPersistenceEnabled(cctx.kernalContext().config());
if (persistenceEnabled) {
GridCacheDatabaseSharedManager dbManager = (GridCacheDatabaseSharedManager) cctx.database();
dbManager.getCheckpointer().skipCheckpointOnNodeStop(true);
}
throw ex;
}
err = heuristicException(ex);
COMMIT_ERR_UPD.compareAndSet(this, null, err);
state(UNKNOWN);
try {
uncommit();
} catch (Throwable e) {
err.addSuppressed(e);
}
throw err;
} finally {
cctx.database().checkpointReadUnlock();
cctx.tm().resetContext();
}
}
// Do not unlock transaction entries if one-phase commit.
if (!onePhaseCommit()) {
if (DONE_FLAG_UPD.compareAndSet(this, 0, 1)) {
// Unlock all locks.
cctx.tm().commitTx(this);
boolean needsCompletedVersions = needsCompletedVersions();
assert !needsCompletedVersions || completedBase != null;
assert !needsCompletedVersions || committedVers != null;
assert !needsCompletedVersions || rolledbackVers != null;
}
}
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgnitePdsSporadicDataRecordsOnBackupTest method findSporadicDataRecords.
/**
* Returns a number of {@link DataRecord} that do not have a xid version,
* in other words, number of records that were created by GridCacheUtils#createBackupPostProcessingClosure().
*
* @throws IgniteCheckedException If failed.
*/
private long findSporadicDataRecords(String nodeFolderName) throws IgniteCheckedException {
File dbDir = new File(U.defaultWorkDirectory(), "db");
File commonWalDir = new File(dbDir, "wal");
File walDir = new File(commonWalDir, nodeFolderName);
File walArchiveDir = new File(new File(commonWalDir, "archive"), nodeFolderName);
assertTrue(walDir.exists());
assertTrue(walArchiveDir.exists());
IteratorParametersBuilder params = new IteratorParametersBuilder();
params.bufferSize(1024 * 1024);
params.filesOrDirs(walDir, walArchiveDir);
params.filter((type, pointer) -> type == WALRecord.RecordType.DATA_RECORD_V2);
int cacheId = CU.cacheId(TX_CACHE_NAME);
long createOpCnt = 0;
try (WALIterator itr = new IgniteWalIteratorFactory().iterator(params)) {
while (itr.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> walEntry = itr.next();
assertTrue(walEntry.get2() instanceof DataRecord);
DataRecord rec = (DataRecord) walEntry.get2();
Predicate<DataEntry> filter = e -> e.cacheId() == cacheId && GridCacheOperation.CREATE == e.op() && e.nearXidVersion() == null;
for (int i = 0; i < rec.entryCount(); i++) {
if (filter.test(rec.get(i)))
createOpCnt++;
}
}
}
return createOpCnt;
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method checkDataWalEntries.
/**
* @throws Exception if failed.
*/
private void checkDataWalEntries(boolean mvcc) throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(mvcc ? new MvccDataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, new MvccVersionImpl(1000L, 10L, i + 1)) : new DataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, DataEntry.EMPTY_FLAGS));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.flush(start, false);
for (DataEntry entry : entries) wal.log(mvcc ? new MvccDataRecord((MvccDataEntry) entry) : new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.cluster().active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
if (!mvcc)
assert dataRecTup.get2() instanceof DataRecord;
else
assert dataRecTup.get2() instanceof MvccDataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.entryCount());
DataEntry readEntry = dataRec.get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
if (mvcc) {
assert entry instanceof MvccDataEntry;
assert readEntry instanceof MvccDataEntry;
assertEquals(((MvccDataEntry) entry).mvccVer(), ((MvccDataEntry) readEntry).mvccVer());
}
idx++;
}
}
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgniteWalReaderTest method runRemoveOperationTest.
/**
* Test if DELETE operation can be found after mixed cache operations including remove().
*
* @param mode Cache Atomicity Mode.
* @throws Exception if failed.
*/
private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception {
Ignite ignite = startGrid();
ignite.cluster().active(true);
createCache2(ignite, mode);
ignite.cluster().active(false);
String subfolderName = genDbSubfolderName(ignite, 0);
stopGrid();
String workDir = U.defaultWorkDirectory();
IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
IteratorParametersBuilder params = createIteratorParametersBuilder(workDir, subfolderName);
params.filesOrDirs(workDir);
StringBuilder sb = new StringBuilder();
Map<GridCacheOperation, Integer> operationsFound = new EnumMap<>(GridCacheOperation.class);
scanIterateAndCount(factory, params, 0, 0, null, dataRecord -> {
sb.append("{");
for (int i = 0; i < dataRecord.entryCount(); i++) {
DataEntry entry = dataRecord.get(i);
GridCacheOperation op = entry.op();
Integer cnt = operationsFound.get(op);
operationsFound.put(op, cnt == null ? 1 : (cnt + 1));
if (entry instanceof UnwrapDataEntry) {
UnwrapDataEntry entry1 = (UnwrapDataEntry) entry;
sb.append(entry1.op()).append(" for ").append(entry1.unwrappedKey());
GridCacheVersion ver = entry.nearXidVersion();
sb.append(", ");
if (ver != null)
sb.append("tx=").append(ver).append(", ");
}
}
sb.append("}\n");
});
final Integer deletesFound = operationsFound.get(DELETE);
if (log.isInfoEnabled())
log.info(sb.toString());
assertTrue("Delete operations should be found in log: " + operationsFound, deletesFound != null && deletesFound > 0);
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgniteWalReaderTest method testRebalanceFlag.
/**
* Tests transaction generation and WAL for putAll cache operation.
*
* @throws Exception if failed.
*/
@Test
@WithSystemProperty(key = IgniteSystemProperties.IGNITE_DISABLE_WAL_DURING_REBALANCING, value = "false")
public void testRebalanceFlag() throws Exception {
backupCnt = 1;
IgniteEx ignite = startGrid("node0");
Ignite ignite1 = startGrid(1);
ignite.cluster().state(ACTIVE);
IgniteCache<Integer, IndexedObject> cache = ignite.cache(CACHE_NAME);
int cntEntries = 100;
List<Integer> keys = findKeys(ignite.localNode(), cache, cntEntries, 0, 0);
Map<Integer, IndexedObject> map = new TreeMap<>();
for (Integer key : keys) map.putIfAbsent(key, new IndexedObject(key));
cache.putAll(map);
Ignite ignite2 = startGrid(2);
ignite.cluster().setBaselineTopology(ignite2.cluster().topologyVersion());
backupCnt = 0;
awaitPartitionMapExchange(false, true, null);
String subfolderName1 = genDbSubfolderName(ignite, 0);
String subfolderName2 = genDbSubfolderName(ignite1, 1);
String subfolderName3 = genDbSubfolderName(ignite2, 2);
stopAllGrids();
String workDir = U.defaultWorkDirectory();
IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
Map<GridCacheOperation, Integer> operationsFound = new EnumMap<>(GridCacheOperation.class);
IgniteInClosure<DataRecord> drHnd = dataRecord -> {
for (int i = 0; i < dataRecord.entryCount(); i++) {
DataEntry entry = dataRecord.get(i);
GridCacheOperation op = entry.op();
Integer cnt = operationsFound.get(op);
operationsFound.put(op, cnt == null ? 1 : (cnt + 1));
}
};
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName1).filesOrDirs(workDir + "/db/wal/" + subfolderName1, workDir + "/db/wal/archive/" + subfolderName1), 1, 1, null, drHnd);
primary = false;
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName2).filesOrDirs(workDir + "/db/wal/" + subfolderName2, workDir + "/db/wal/archive/" + subfolderName2), 1, 1, null, drHnd);
rebalance = true;
scanIterateAndCount(factory, createIteratorParametersBuilder(workDir, subfolderName3).filesOrDirs(workDir + "/db/wal/" + subfolderName3, workDir + "/db/wal/archive/" + subfolderName3), 1, 0, null, drHnd);
}
Aggregations