use of org.apache.ignite.internal.processors.cache.GridCacheOperation in project ignite by apache.
the class IgniteWalReaderTest method runRemoveOperationTest.
/**
* Test if DELETE operation can be found after mixed cache operations including remove().
*
* @throws Exception if failed.
* @param mode Cache Atomicity Mode.
*/
private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception {
final Ignite ignite = startGrid("node0");
ignite.active(true);
createCache2(ignite, mode);
ignite.active(false);
final String subfolderName = genDbSubfolderName(ignite, 0);
stopGrid("node0");
final String workDir = U.defaultWorkDirectory();
final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName);
final StringBuilder builder = new StringBuilder();
final Map<GridCacheOperation, Integer> operationsFound = new EnumMap<>(GridCacheOperation.class);
scanIterateAndCount(factory, workDir, subfolderName, 0, 0, null, new IgniteInClosure<DataRecord>() {
@Override
public void apply(DataRecord dataRecord) {
final List<DataEntry> entries = dataRecord.writeEntries();
builder.append("{");
for (DataEntry entry : entries) {
final GridCacheOperation op = entry.op();
final Integer cnt = operationsFound.get(op);
operationsFound.put(op, cnt == null ? 1 : (cnt + 1));
if (entry instanceof UnwrapDataEntry) {
final UnwrapDataEntry entry1 = (UnwrapDataEntry) entry;
builder.append(entry1.op()).append(" for ").append(entry1.unwrappedKey());
final GridCacheVersion ver = entry.nearXidVersion();
builder.append(", ");
if (ver != null)
builder.append("tx=").append(ver).append(", ");
}
}
builder.append("}\n");
}
});
final Integer deletesFound = operationsFound.get(DELETE);
if (log.isInfoEnabled())
log.info(builder.toString());
assertTrue("Delete operations should be found in log: " + operationsFound, deletesFound != null && deletesFound > 0);
}
use of org.apache.ignite.internal.processors.cache.GridCacheOperation in project ignite by apache.
the class RecordDataV1Serializer method readDataEntry.
/**
* @param in Input to read from.
* @return Read entry.
*/
DataEntry readDataEntry(ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException {
int cacheId = in.readInt();
int keySize = in.readInt();
byte keyType = in.readByte();
byte[] keyBytes = new byte[keySize];
in.readFully(keyBytes);
int valSize = in.readInt();
byte valType = 0;
byte[] valBytes = null;
if (valSize >= 0) {
valType = in.readByte();
valBytes = new byte[valSize];
in.readFully(valBytes);
}
byte ord = in.readByte();
GridCacheOperation op = GridCacheOperation.fromOrdinal(ord & 0xFF);
GridCacheVersion nearXidVer = readVersion(in, true);
GridCacheVersion writeVer = readVersion(in, false);
int partId = in.readInt();
long partCntr = in.readLong();
long expireTime = in.readLong();
GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
if (cacheCtx != null) {
CacheObjectContext coCtx = cacheCtx.cacheObjectContext();
KeyCacheObject key = co.toKeyCacheObject(coCtx, keyType, keyBytes);
CacheObject val = valBytes != null ? co.toCacheObject(coCtx, valType, valBytes) : null;
return new DataEntry(cacheId, key, val, op, nearXidVer, writeVer, expireTime, partId, partCntr);
} else
return new LazyDataEntry(cctx, cacheId, keyType, keyBytes, valType, valBytes, op, nearXidVer, writeVer, expireTime, partId, partCntr);
}
use of org.apache.ignite.internal.processors.cache.GridCacheOperation in project ignite by apache.
the class IgniteTxAdapter method batchStoreCommit.
/**
* Performs batch database operations. This commit must be called
* before cache update. This way if there is a DB failure,
* cache transaction can still be rolled back.
*
* @param writeEntries Transaction write set.
* @throws IgniteCheckedException If batch update failed.
*/
@SuppressWarnings({ "CatchGenericClass" })
protected final void batchStoreCommit(Iterable<IgniteTxEntry> writeEntries) throws IgniteCheckedException {
if (!storeEnabled() || internal() || // No need to work with local store at GridNearTxRemote.
(!local() && near()))
return;
Collection<CacheStoreManager> stores = txState().stores(cctx);
if (stores == null || stores.isEmpty())
return;
assert isWriteToStoreFromDhtValid(stores) : "isWriteToStoreFromDht can't be different within one transaction";
CacheStoreManager first = F.first(stores);
boolean isWriteToStoreFromDht = first.isWriteToStoreFromDht();
if ((local() || first.isLocal()) && (near() || isWriteToStoreFromDht)) {
try {
if (writeEntries != null) {
Map<KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> putMap = null;
List<KeyCacheObject> rmvCol = null;
CacheStoreManager writeStore = null;
boolean skipNonPrimary = near() && isWriteToStoreFromDht;
for (IgniteTxEntry e : writeEntries) {
boolean skip = e.skipStore();
if (!skip && skipNonPrimary) {
skip = e.cached().isNear() || e.cached().detached() || !e.context().affinity().primaryByPartition(e.cached().partition(), topologyVersion()).isLocal();
}
if (// Update local store at backups only if needed.
!skip && !local() && cctx.localStorePrimaryOnly())
skip = true;
if (skip)
continue;
boolean intercept = e.context().config().getInterceptor() != null;
if (intercept || !F.isEmpty(e.entryProcessors()))
e.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(e, false, null);
GridCacheContext cacheCtx = e.context();
GridCacheOperation op = res.get1();
KeyCacheObject key = e.key();
CacheObject val = res.get2();
GridCacheVersion ver = writeVersion();
if (op == CREATE || op == UPDATE) {
// Batch-process all removes if needed.
if (rmvCol != null && !rmvCol.isEmpty()) {
assert writeStore != null;
writeStore.removeAll(this, rmvCol);
// Reset.
rmvCol.clear();
writeStore = null;
}
// Batch-process puts if cache ID has changed.
if (writeStore != null && writeStore != cacheCtx.store()) {
if (putMap != null && !putMap.isEmpty()) {
writeStore.putAll(this, putMap);
// Reset.
putMap.clear();
}
writeStore = null;
}
if (intercept) {
Object interceptorVal = cacheCtx.config().getInterceptor().onBeforePut(new CacheLazyEntry(cacheCtx, key, e.cached().rawGet(), e.keepBinary()), cacheCtx.cacheObjectContext().unwrapBinaryIfNeeded(val, e.keepBinary(), false));
if (interceptorVal == null)
continue;
val = cacheCtx.toCacheObject(cacheCtx.unwrapTemporary(interceptorVal));
}
if (writeStore == null)
writeStore = cacheCtx.store();
if (writeStore.isWriteThrough()) {
if (putMap == null)
putMap = new LinkedHashMap<>(writeMap().size(), 1.0f);
putMap.put(key, F.t(val, ver));
}
} else if (op == DELETE) {
// Batch-process all puts if needed.
if (putMap != null && !putMap.isEmpty()) {
assert writeStore != null;
writeStore.putAll(this, putMap);
// Reset.
putMap.clear();
writeStore = null;
}
if (writeStore != null && writeStore != cacheCtx.store()) {
if (rmvCol != null && !rmvCol.isEmpty()) {
writeStore.removeAll(this, rmvCol);
// Reset.
rmvCol.clear();
}
writeStore = null;
}
if (intercept) {
IgniteBiTuple<Boolean, Object> t = cacheCtx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(cacheCtx, key, e.cached().rawGet(), e.keepBinary()));
if (cacheCtx.cancelRemove(t))
continue;
}
if (writeStore == null)
writeStore = cacheCtx.store();
if (writeStore.isWriteThrough()) {
if (rmvCol == null)
rmvCol = new ArrayList<>();
rmvCol.add(key);
}
} else if (log.isDebugEnabled())
log.debug("Ignoring NOOP entry for batch store commit: " + e);
}
if (putMap != null && !putMap.isEmpty()) {
assert rmvCol == null || rmvCol.isEmpty();
assert writeStore != null;
// Batch put at the end of transaction.
writeStore.putAll(this, putMap);
}
if (rmvCol != null && !rmvCol.isEmpty()) {
assert putMap == null || putMap.isEmpty();
assert writeStore != null;
// Batch remove at the end of transaction.
writeStore.removeAll(this, rmvCol);
}
}
// Commit while locks are held.
sessionEnd(stores, true);
} catch (IgniteCheckedException ex) {
commitError(ex);
errorWhenCommitting();
// Safe to remove transaction from committed tx list because nothing was committed yet.
cctx.tm().removeCommittedTx(this);
throw ex;
} catch (Throwable ex) {
commitError(ex);
errorWhenCommitting();
// Safe to remove transaction from committed tx list because nothing was committed yet.
cctx.tm().removeCommittedTx(this);
if (ex instanceof Error)
throw (Error) ex;
throw new IgniteCheckedException("Failed to commit transaction to database: " + this, ex);
} finally {
if (isRollbackOnly())
sessionEnd(stores, false);
}
} else
sessionEnd(stores, true);
}
use of org.apache.ignite.internal.processors.cache.GridCacheOperation in project ignite by apache.
the class GridNearAtomicCache method processNearAtomicUpdateResponse.
/**
* @param ver Version.
* @param key Key.
* @param val Value.
* @param ttl TTL.
* @param expireTime Expire time.
* @param nodeId Node ID.
* @param subjId Subject ID.
* @param taskName Task name.
* @throws IgniteCheckedException If failed.
*/
private void processNearAtomicUpdateResponse(GridCacheVersion ver, KeyCacheObject key, @Nullable CacheObject val, long ttl, long expireTime, boolean keepBinary, UUID nodeId, UUID subjId, String taskName) throws IgniteCheckedException {
try {
while (true) {
GridCacheEntryEx entry = null;
AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
try {
entry = entryEx(key, topVer);
GridCacheOperation op = val != null ? UPDATE : DELETE;
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nodeId, nodeId, op, val, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
false, keepBinary, /*expiry policy*/
null, /*event*/
true, /*metrics*/
true, /*primary*/
false, /*check version*/
true, topVer, CU.empty0(), DR_NONE, ttl, expireTime, null, false, false, subjId, taskName, null, null, null);
if (updRes.removeVersion() != null)
ctx.onDeferredDelete(entry, updRes.removeVersion());
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry while updating near cache value (will retry): " + key);
entry = null;
} finally {
if (entry != null)
ctx.evicts().touch(entry, topVer);
}
}
} catch (GridDhtInvalidPartitionException ignored) {
// Ignore.
}
}
use of org.apache.ignite.internal.processors.cache.GridCacheOperation in project ignite by apache.
the class IgniteTxLocalAdapter method userCommit.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "CatchGenericClass" })
@Override
public void userCommit() throws IgniteCheckedException {
TransactionState state = state();
if (state != COMMITTING) {
if (remainingTime() == -1)
throw new IgniteTxTimeoutCheckedException("Transaction timed out: " + this);
setRollbackOnly();
throw new IgniteCheckedException("Invalid transaction state for commit [state=" + state + ", tx=" + this + ']');
}
checkValid();
Collection<IgniteTxEntry> commitEntries = (near() || cctx.snapshot().needTxReadLogging()) ? allEntries() : writeEntries();
boolean empty = F.isEmpty(commitEntries);
// locks on backup nodes.
if (!empty || colocated())
cctx.tm().addCommittedTx(this);
if (!empty) {
batchStoreCommit(writeEntries());
WALPointer ptr = null;
cctx.database().checkpointReadLock();
try {
cctx.tm().txContext(this);
AffinityTopologyVersion topVer = topologyVersion();
/*
* Commit to cache. Note that for 'near' transaction we loop through all the entries.
*/
for (IgniteTxEntry txEntry : commitEntries) {
GridCacheContext cacheCtx = txEntry.context();
GridDrType drType = cacheCtx.isDrEnabled() ? DR_PRIMARY : DR_NONE;
UUID nodeId = txEntry.nodeId() == null ? this.nodeId : txEntry.nodeId();
try {
while (true) {
try {
GridCacheEntryEx cached = txEntry.cached();
// transaction manager to make sure locks are held.
if (!evictNearEntry(txEntry, false)) {
if (cacheCtx.isNear() && cacheCtx.dr().receiveEnabled()) {
cached.markObsolete(xidVer);
break;
}
if (cached.detached())
break;
GridCacheEntryEx nearCached = null;
boolean metrics = true;
if (updateNearCache(cacheCtx, txEntry.key(), topVer))
nearCached = cacheCtx.dht().near().peekEx(txEntry.key());
else if (cacheCtx.isNear() && txEntry.locallyMapped())
metrics = false;
boolean evt = !isNearLocallyMapped(txEntry, false);
if (!F.isEmpty(txEntry.entryProcessors()) || !F.isEmpty(txEntry.filters()))
txEntry.cached().unswap(false);
IgniteBiTuple<GridCacheOperation, CacheObject> res = applyTransformClosures(txEntry, true, null);
GridCacheVersion dhtVer = null;
// backup remote transaction completes.
if (cacheCtx.isNear()) {
if (txEntry.op() == CREATE || txEntry.op() == UPDATE || txEntry.op() == DELETE || txEntry.op() == TRANSFORM)
dhtVer = txEntry.dhtVersion();
if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) {
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
txEntry.cached().unswap(false);
Duration duration = cached.hasValue() ? expiry.getExpiryForUpdate() : expiry.getExpiryForCreation();
txEntry.ttl(CU.toTtl(duration));
}
}
}
GridCacheOperation op = res.get1();
CacheObject val = res.get2();
// Deal with conflicts.
GridCacheVersion explicitVer = txEntry.conflictVersion() != null ? txEntry.conflictVersion() : writeVersion();
if ((op == CREATE || op == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) {
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
Duration duration = cached.hasValue() ? expiry.getExpiryForUpdate() : expiry.getExpiryForCreation();
long ttl = CU.toTtl(duration);
txEntry.ttl(ttl);
if (ttl == CU.TTL_ZERO)
op = DELETE;
}
}
boolean conflictNeedResolve = cacheCtx.conflictNeedResolve();
GridCacheVersionConflictContext<?, ?> conflictCtx = null;
if (conflictNeedResolve) {
IgniteBiTuple<GridCacheOperation, GridCacheVersionConflictContext> conflictRes = conflictResolve(op, txEntry, val, explicitVer, cached);
assert conflictRes != null;
conflictCtx = conflictRes.get2();
if (conflictCtx.isUseOld())
op = NOOP;
else if (conflictCtx.isUseNew()) {
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
} else {
assert conflictCtx.isMerge();
op = conflictRes.get1();
val = txEntry.context().toCacheObject(conflictCtx.mergeValue());
explicitVer = writeVersion();
txEntry.ttl(conflictCtx.ttl());
txEntry.conflictExpireTime(conflictCtx.expireTime());
}
} else
// Nullify explicit version so that innerSet/innerRemove will work as usual.
explicitVer = null;
if (sndTransformedVals || conflictNeedResolve) {
assert sndTransformedVals && cacheCtx.isReplicated() || conflictNeedResolve;
txEntry.value(val, true, false);
txEntry.op(op);
txEntry.entryProcessors(null);
txEntry.conflictVersion(explicitVer);
}
if (dhtVer == null)
dhtVer = explicitVer != null ? explicitVer : writeVersion();
if (op == CREATE || op == UPDATE) {
assert val != null : txEntry;
GridCacheUpdateTxResult updRes = cached.innerSet(this, eventNodeId(), txEntry.nodeId(), val, false, false, txEntry.ttl(), evt, metrics, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, cached.detached() ? DR_NONE : drType, txEntry.conflictExpireTime(), cached.isNear() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, null);
if (updRes.success())
txEntry.updateCounter(updRes.updatePartitionCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
if (nearCached != null && updRes.success()) {
nearCached.innerSet(null, eventNodeId(), nodeId, val, false, false, txEntry.ttl(), false, metrics, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, CU.empty0(), DR_NONE, txEntry.conflictExpireTime(), null, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, null);
}
} else if (op == DELETE) {
GridCacheUpdateTxResult updRes = cached.innerRemove(this, eventNodeId(), txEntry.nodeId(), false, evt, metrics, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, null, cached.detached() ? DR_NONE : drType, cached.isNear() ? null : explicitVer, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, null);
if (updRes.success())
txEntry.updateCounter(updRes.updatePartitionCounter());
if (updRes.loggedPointer() != null)
ptr = updRes.loggedPointer();
if (nearCached != null && updRes.success()) {
nearCached.innerRemove(null, eventNodeId(), nodeId, false, false, metrics, txEntry.keepBinary(), txEntry.hasOldValue(), txEntry.oldValue(), topVer, CU.empty0(), DR_NONE, null, CU.subjectId(this, cctx), resolveTaskName(), dhtVer, null);
}
} else if (op == RELOAD) {
cached.innerReload();
if (nearCached != null)
nearCached.innerReload();
} else if (op == READ) {
CacheGroupContext grp = cacheCtx.group();
if (grp.persistenceEnabled() && grp.walEnabled() && cctx.snapshot().needTxReadLogging()) {
ptr = cctx.wal().log(new DataRecord(new DataEntry(cacheCtx.cacheId(), txEntry.key(), val, op, nearXidVersion(), writeVersion(), 0, txEntry.key().partition(), txEntry.updateCounter())));
}
ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry);
if (expiry != null) {
Duration duration = expiry.getExpiryForAccess();
if (duration != null)
cached.updateTtl(null, CU.toTtl(duration));
}
if (log.isDebugEnabled())
log.debug("Ignoring READ entry when committing: " + txEntry);
} else {
assert ownsLock(txEntry.cached()) : "Transaction does not own lock for group lock entry during commit [tx=" + this + ", txEntry=" + txEntry + ']';
if (conflictCtx == null || !conflictCtx.isUseOld()) {
if (txEntry.ttl() != CU.TTL_NOT_CHANGED)
cached.updateTtl(null, txEntry.ttl());
}
if (log.isDebugEnabled())
log.debug("Ignoring NOOP entry when committing: " + txEntry);
}
}
// if an entry is obsolete).
if (txEntry.op() != READ)
checkCommitLocks(cached);
// Break out of while loop.
break;
}// If entry cached within transaction got removed.
catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry during transaction commit (will retry): " + txEntry);
txEntry.cached(entryEx(cacheCtx, txEntry.txKey(), topologyVersion()));
}
}
} catch (Throwable ex) {
// We are about to initiate transaction rollback when tx has started to committing.
// Need to remove version from committed list.
cctx.tm().removeCommittedTx(this);
if (X.hasCause(ex, GridCacheIndexUpdateException.class) && cacheCtx.cache().isMongoDataCache()) {
if (log.isDebugEnabled())
log.debug("Failed to update mongo document index (transaction entry will " + "be ignored): " + txEntry);
// Set operation to NOOP.
txEntry.op(NOOP);
errorWhenCommitting();
throw ex;
} else {
boolean nodeStopping = X.hasCause(ex, NodeStoppingException.class);
IgniteCheckedException err = new IgniteTxHeuristicCheckedException("Failed to locally write to cache " + "(all transaction entries will be invalidated, however there was a window when " + "entries for this transaction were visible to others): " + this, ex);
if (nodeStopping) {
U.warn(log, "Failed to commit transaction, node is stopping " + "[tx=" + this + ", err=" + ex + ']');
} else
U.error(log, "Heuristic transaction failure.", err);
COMMIT_ERR_UPD.compareAndSet(this, null, err);
state(UNKNOWN);
try {
// Courtesy to minimize damage.
uncommit(nodeStopping);
} catch (Throwable ex1) {
U.error(log, "Failed to uncommit transaction: " + this, ex1);
if (ex1 instanceof Error)
throw ex1;
}
if (ex instanceof Error)
throw ex;
throw err;
}
}
}
if (ptr != null && !cctx.tm().logTxRecords())
cctx.wal().fsync(ptr);
} catch (StorageException e) {
throw new IgniteCheckedException("Failed to log transaction record " + "(transaction will be rolled back): " + this, e);
} finally {
cctx.database().checkpointReadUnlock();
cctx.tm().resetContext();
}
}
// Do not unlock transaction entries if one-phase commit.
if (!onePhaseCommit()) {
if (DONE_FLAG_UPD.compareAndSet(this, 0, 1)) {
// Unlock all locks.
cctx.tm().commitTx(this);
boolean needsCompletedVersions = needsCompletedVersions();
assert !needsCompletedVersions || completedBase != null;
assert !needsCompletedVersions || committedVers != null;
assert !needsCompletedVersions || rolledbackVers != null;
}
}
}
Aggregations