use of org.apache.ignite.transactions.TransactionState in project ignite by apache.
the class TxRecordSerializer method read.
/**
* Reads {@link TxRecord} from given input.
*
* @param in Input
* @return TxRecord.
* @throws IOException In case of fail.
* @throws IgniteCheckedException In case of fail.
*/
public TxRecord read(ByteBufferBackedDataInput in) throws IOException, IgniteCheckedException {
byte txState = in.readByte();
TransactionState state = TransactionState.fromOrdinal(txState);
GridCacheVersion nearXidVer = RecordV1Serializer.readVersion(in, true);
GridCacheVersion writeVer = RecordV1Serializer.readVersion(in, true);
int participatingNodesSize = in.readInt();
Map<Short, Collection<Short>> participatingNodes = U.newHashMap(participatingNodesSize);
for (int i = 0; i < participatingNodesSize; i++) {
short primaryNode = in.readShort();
int backupNodesSize = in.readInt();
Collection<Short> backupNodes = new ArrayList<>(backupNodesSize);
for (int j = 0; j < backupNodesSize; j++) {
short backupNode = in.readShort();
backupNodes.add(backupNode);
}
participatingNodes.put(primaryNode, backupNodes);
}
long ts = in.readLong();
return new TxRecord(state, nearXidVer, writeVer, participatingNodes, ts);
}
use of org.apache.ignite.transactions.TransactionState in project ignite by apache.
the class IgniteTxLocalAdapter method addEntry.
/**
* @param op Cache operation.
* @param val Value.
* @param expiryPlc Explicitly specified expiry policy.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param entryProcessor Entry processor.
* @param entry Cache entry.
* @param filter Filter.
* @param filtersSet {@code True} if filter should be marked as set.
* @param drTtl DR TTL (if any).
* @param drExpireTime DR expire time (if any).
* @param drVer DR version.
* @param skipStore Skip store flag.
* @return Transaction entry.
*/
protected final IgniteTxEntry addEntry(GridCacheOperation op, @Nullable CacheObject val, @Nullable EntryProcessor entryProcessor, Object[] invokeArgs, GridCacheEntryEx entry, @Nullable ExpiryPolicy expiryPlc, CacheEntryPredicate[] filter, boolean filtersSet, long drTtl, long drExpireTime, @Nullable GridCacheVersion drVer, boolean skipStore, boolean keepBinary, boolean addReader) {
assert invokeArgs == null || op == TRANSFORM;
IgniteTxKey key = entry.txKey();
checkInternal(key);
TransactionState state = state();
assert state == TransactionState.ACTIVE || remainingTime() == -1 : "Invalid tx state for adding entry [op=" + op + ", val=" + val + ", entry=" + entry + ", filter=" + Arrays.toString(filter) + ", txCtx=" + cctx.tm().txContextVersion() + ", tx=" + this + ']';
IgniteTxEntry old = entry(key);
// Keep old filter if already have one (empty filter is always overridden).
if (!filtersSet || !F.isEmptyOrNulls(filter)) {
// Replace filter if previous filter failed.
if (old != null && old.filtersSet())
filter = old.filters();
}
IgniteTxEntry txEntry;
if (old != null) {
if (entryProcessor != null) {
assert val == null;
assert op == TRANSFORM;
// Will change the op.
old.addEntryProcessor(entryProcessor, invokeArgs);
} else {
assert old.op() != TRANSFORM;
old.op(op);
old.value(val, op == CREATE || op == UPDATE || op == DELETE, op == READ);
}
// Keep old ttl value.
old.cached(entry);
old.filters(filter);
// Keep old skipStore and keepBinary flags.
old.skipStore(skipStore);
old.keepBinary(keepBinary);
// Update ttl if specified.
if (drTtl >= 0L) {
assert drExpireTime >= 0L;
entryTtlDr(key, drTtl, drExpireTime);
} else
entryExpiry(key, expiryPlc);
txEntry = old;
if (log.isDebugEnabled())
log.debug("Updated transaction entry: " + txEntry);
} else {
boolean hasDrTtl = drTtl >= 0;
txEntry = new IgniteTxEntry(entry.context(), this, op, val, EntryProcessorResourceInjectorProxy.wrap(cctx.kernalContext(), entryProcessor), invokeArgs, hasDrTtl ? drTtl : -1L, entry, filter, drVer, skipStore, keepBinary, addReader);
txEntry.conflictExpireTime(drExpireTime);
if (!hasDrTtl)
txEntry.expiry(expiryPlc);
txState.addEntry(txEntry);
if (log.isDebugEnabled())
log.debug("Created transaction entry: " + txEntry);
}
txEntry.filtersSet(filtersSet);
while (true) {
try {
updateExplicitVersion(txEntry, entry);
return txEntry;
} catch (GridCacheEntryRemovedException ignore) {
if (log.isDebugEnabled())
log.debug("Got removed entry in transaction newEntry method (will retry): " + entry);
entry = entryEx(entry.context(), txEntry.txKey(), topologyVersion());
txEntry.cached(entry);
}
}
}
use of org.apache.ignite.transactions.TransactionState in project ignite by apache.
the class TxRecoveryWithConcurrentRollbackTest method testRecoveryNotBreakingTxAtomicityOnNearFail.
/**
* The test enforces specific order in messages processing during concurrent tx rollback and tx recovery due to
* node left.
* <p>
* Expected result: both DHT transactions produces same COMMITTED state on tx finish.
*/
@Test
public void testRecoveryNotBreakingTxAtomicityOnNearFail() throws Exception {
backups = 1;
persistence = false;
final IgniteEx node0 = startGrids(3);
node0.cluster().state(ACTIVE);
final Ignite client = startGrid("client");
final IgniteCache<Object, Object> cache = client.cache(DEFAULT_CACHE_NAME);
final List<Integer> g0Keys = primaryKeys(grid(0).cache(DEFAULT_CACHE_NAME), 100);
final List<Integer> g1Keys = primaryKeys(grid(1).cache(DEFAULT_CACHE_NAME), 100);
final List<Integer> g2BackupKeys = backupKeys(grid(2).cache(DEFAULT_CACHE_NAME), 100, 0);
Integer k1 = null;
Integer k2 = null;
for (Integer key : g2BackupKeys) {
if (g0Keys.contains(key))
k1 = key;
else if (g1Keys.contains(key))
k2 = key;
if (k1 != null && k2 != null)
break;
}
assertNotNull(k1);
assertNotNull(k2);
List<IgniteInternalTx> txs0 = null;
List<IgniteInternalTx> txs1 = null;
CountDownLatch stripeBlockLatch = new CountDownLatch(1);
int[] stripeHolder = new int[1];
try (final Transaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED)) {
cache.put(k1, Boolean.TRUE);
cache.put(k2, Boolean.TRUE);
TransactionProxyImpl p = (TransactionProxyImpl) tx;
p.tx().prepare(true);
txs0 = txs(grid(0));
txs1 = txs(grid(1));
List<IgniteInternalTx> txs2 = txs(grid(2));
assertTrue(txs0.size() == 1);
assertTrue(txs1.size() == 1);
assertTrue(txs2.size() == 2);
// Prevent recovery request for grid1 tx branch to go to grid0.
spi(grid(1)).blockMessages(GridCacheTxRecoveryRequest.class, grid(0).name());
// Prevent finish(false) request processing on node0.
spi(client).blockMessages(GridNearTxFinishRequest.class, grid(0).name());
int stripe = U.safeAbs(p.tx().xidVersion().hashCode());
stripeHolder[0] = stripe;
// Blocks stripe processing for rollback request on node1.
grid(1).context().pools().getStripedExecutorService().execute(stripe, () -> U.awaitQuiet(stripeBlockLatch));
// Dummy task to ensure msg is processed.
grid(1).context().pools().getStripedExecutorService().execute(stripe, () -> {
});
runAsync(() -> {
spi(client).waitForBlocked();
client.close();
return null;
});
tx.rollback();
fail();
} catch (Exception ignored) {
// Expected.
}
// Wait until tx0 is committed by recovery on node0.
assertNotNull(txs0);
try {
txs0.get(0).finishFuture().get(3_000);
} catch (IgniteFutureTimeoutCheckedException e) {
// If timeout happens recovery message from g0 to g1 is mapped to the same stripe as near finish request.
// We will complete latch to allow sequential processing.
stripeBlockLatch.countDown();
// Wait until sequential processing is finished.
assertTrue("sequential processing", GridTestUtils.waitForCondition(() -> grid(1).context().pools().getStripedExecutorService().queueStripeSize(stripeHolder[0]) == 0, 5_000));
// Unblock recovery message from g1 to g0 because tx is in RECOVERY_FINISH state and waits for recovery end.
spi(grid(1)).stopBlock();
txs0.get(0).finishFuture().get();
txs1.get(0).finishFuture().get();
final TransactionState s1 = txs0.get(0).state();
final TransactionState s2 = txs1.get(0).state();
assertEquals(s1, s2);
return;
}
// Release rollback request processing, triggering an attempt to rollback the transaction during recovery.
stripeBlockLatch.countDown();
// Wait until finish message is processed.
assertTrue("concurrent processing", GridTestUtils.waitForCondition(() -> grid(1).context().pools().getStripedExecutorService().queueStripeSize(stripeHolder[0]) == 0, 5_000));
// Proceed with recovery on grid1 -> grid0. Tx0 is committed so tx1 also should be committed.
spi(grid(1)).stopBlock();
assertNotNull(txs1);
txs1.get(0).finishFuture().get();
final TransactionState s1 = txs0.get(0).state();
final TransactionState s2 = txs1.get(0).state();
assertEquals(s1, s2);
}
use of org.apache.ignite.transactions.TransactionState in project ignite by apache.
the class WalStat method registerTxRecord.
/**
* @param txRecord TX record to handle.
*/
private void registerTxRecord(TxRecord txRecord) {
final TransactionState state = txRecord.state();
incrementStat(state.toString(), txRecord, txRecordAct);
int totalNodes = 0;
final Map<Short, Collection<Short>> map = txRecord.participatingNodes();
if (map != null) {
incrementStat(map.size(), txRecord, txRecordPrimNodesCnt);
final HashSet<Object> set = new HashSet<>(150);
for (Map.Entry<Short, Collection<Short>> next : map.entrySet()) {
set.add(next.getKey());
set.addAll(next.getValue());
}
totalNodes = set.size();
incrementStat(totalNodes, txRecord, txRecordNodesCnt);
}
final GridCacheVersion ver = txRecord.nearXidVersion();
if (ver != null) {
switch(state) {
case PREPARING:
case PREPARED:
txStat.onTxPrepareStart(ver, map != null ? map.size() : 0, totalNodes);
break;
case COMMITTED:
txStat.onTxEnd(ver, true);
break;
default:
txStat.onTxEnd(ver, false);
}
}
}
use of org.apache.ignite.transactions.TransactionState in project ignite by apache.
the class IgniteTxAdapter method errorWhenCommitting.
/**
* {@inheritDoc}
*/
@Override
public final void errorWhenCommitting() {
synchronized (this) {
TransactionState prev = state;
assert prev == COMMITTING : prev;
state = MARKED_ROLLBACK;
if (log.isDebugEnabled())
log.debug("Changed transaction state [prev=" + prev + ", new=" + this.state + ", tx=" + this + ']');
notifyAll();
}
}
Aggregations