use of org.apache.ignite.internal.visor.tx.VisorTxInfo in project ignite by apache.
the class TxCommands method printTransactionMappings.
/**
* Prints transaction mappings for specific cluster node to output.
*
* @param indent Indent.
* @param entry Entry.
*/
private void printTransactionMappings(String indent, Map.Entry<ClusterNode, VisorTxTaskResult> entry) {
for (VisorTxInfo info : entry.getValue().getInfos()) {
TxVerboseInfo verboseInfo = info.getTxVerboseInfo();
if (verboseInfo != null) {
logger.info(indent + "Mapping [type=" + verboseInfo.txMappingType() + "]:");
printTransactionMapping(indent + DOUBLE_INDENT, info, verboseInfo);
} else {
logger.info(indent + "Mapping [type=HISTORICAL]:");
logger.info(indent + DOUBLE_INDENT + "State: " + info.getState());
}
}
}
use of org.apache.ignite.internal.visor.tx.VisorTxInfo in project ignite by apache.
the class TransactionsMXBeanImpl method getActiveTransactions.
/**
* {@inheritDoc}
*/
@Override
public String getActiveTransactions(Long minDuration, Integer minSize, String prj, String consistentIds, String xid, String lbRegex, Integer limit, String order, boolean detailed, boolean kill) {
try {
IgniteCompute compute = ctx.cluster().get().compute();
VisorTxProjection proj = null;
if (prj != null) {
if ("clients".equals(prj))
proj = VisorTxProjection.CLIENT;
else if ("servers".equals(prj))
proj = VisorTxProjection.SERVER;
}
List<String> consIds = null;
if (consistentIds != null)
consIds = Arrays.stream(consistentIds.split(",")).collect(Collectors.toList());
VisorTxSortOrder sortOrder = null;
if (order != null)
sortOrder = VisorTxSortOrder.valueOf(order.toUpperCase());
VisorTxTaskArg arg = new VisorTxTaskArg(kill ? VisorTxOperation.KILL : VisorTxOperation.LIST, limit, minDuration == null ? null : minDuration * 1000, minSize, null, proj, consIds, xid, lbRegex, sortOrder, null);
Map<ClusterNode, VisorTxTaskResult> res = compute.execute(new VisorTxTask(), new VisorTaskArgument<>(ctx.cluster().get().localNode().id(), arg, false));
if (detailed) {
StringWriter sw = new StringWriter();
PrintWriter w = new PrintWriter(sw);
for (Map.Entry<ClusterNode, VisorTxTaskResult> entry : res.entrySet()) {
if (entry.getValue().getInfos().isEmpty())
continue;
ClusterNode key = entry.getKey();
w.println(key.toString());
for (VisorTxInfo info : entry.getValue().getInfos()) w.println(info.toUserString());
}
w.flush();
return sw.toString();
} else {
int cnt = 0;
for (VisorTxTaskResult result : res.values()) cnt += result.getInfos().size();
return Integer.toString(cnt);
}
} catch (Exception e) {
throw new RuntimeException(e.getMessage());
}
}
use of org.apache.ignite.internal.visor.tx.VisorTxInfo in project ignite by apache.
the class GridCommandHandlerTest method testKillHangingRemoteTransactions.
/**
* Simulate uncommitted backup transactions and test rolling back using utility.
*/
@Test
public void testKillHangingRemoteTransactions() throws Exception {
final int cnt = 3;
startGridsMultiThreaded(cnt);
Ignite[] clients = new Ignite[] { startGrid("client1"), startGrid("client2"), startGrid("client3"), startGrid("client4") };
clients[0].getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setBackups(2).setAtomicityMode(TRANSACTIONAL).setWriteSynchronizationMode(FULL_SYNC).setAffinity(new RendezvousAffinityFunction(false, 64)));
awaitPartitionMapExchange();
for (Ignite client : clients) {
assertTrue(client.configuration().isClientMode());
assertNotNull(client.cache(DEFAULT_CACHE_NAME));
}
LongAdder progress = new LongAdder();
AtomicInteger idx = new AtomicInteger();
int tc = clients.length;
CountDownLatch lockLatch = new CountDownLatch(1);
CountDownLatch commitLatch = new CountDownLatch(1);
Ignite prim = primaryNode(0L, DEFAULT_CACHE_NAME);
TestRecordingCommunicationSpi primSpi = TestRecordingCommunicationSpi.spi(prim);
primSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
@Override
public boolean apply(ClusterNode node, Message message) {
return message instanceof GridDhtTxFinishRequest;
}
});
Set<IgniteUuid> xidSet = new GridConcurrentHashSet<>();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
int id = idx.getAndIncrement();
Ignite client = clients[id];
try (Transaction tx = client.transactions().txStart(PESSIMISTIC, READ_COMMITTED, 0, 1)) {
xidSet.add(tx.xid());
IgniteCache<Long, Long> cache = client.cache(DEFAULT_CACHE_NAME);
if (id != 0)
U.awaitQuiet(lockLatch);
cache.invoke(0L, new IncrementClosure(), null);
if (id == 0) {
lockLatch.countDown();
U.awaitQuiet(commitLatch);
// Wait until candidates will enqueue.
doSleep(500);
}
tx.commit();
} catch (Exception e) {
assertTrue(X.hasCause(e, TransactionTimeoutException.class));
}
progress.increment();
}
}, tc, "invoke-thread");
U.awaitQuiet(lockLatch);
commitLatch.countDown();
primSpi.waitForBlocked(clients.length);
// Unblock only finish messages from clients from 2 to 4.
primSpi.stopBlock(true, blockedMsg -> {
GridIoMessage iom = blockedMsg.ioMessage();
Message m = iom.message();
if (m instanceof GridDhtTxFinishRequest) {
GridDhtTxFinishRequest r = (GridDhtTxFinishRequest) m;
return !r.nearNodeId().equals(clients[0].cluster().localNode().id());
}
return true;
});
// Wait until queue is stable
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
for (IgniteInternalTx tx : txs) if (!tx.local()) {
IgniteTxEntry entry = tx.writeEntries().iterator().next();
GridCacheEntryEx cached = entry.cached();
Collection<GridCacheMvccCandidate> candidates = cached.remoteMvccSnapshot();
if (candidates.size() != clients.length)
return false;
}
return true;
}
}, 10_000);
}
CommandHandler h = new CommandHandler();
// Check listing.
validate(h, map -> {
for (int i = 0; i < cnt; i++) {
IgniteEx grid = grid(i);
// Skip primary.
if (grid.localNode().id().equals(prim.cluster().localNode().id()))
continue;
VisorTxTaskResult res = map.get(grid.localNode());
List<VisorTxInfo> infos = res.getInfos().stream().filter(info -> xidSet.contains(info.getNearXid())).collect(Collectors.toList());
// Validate queue length on backups.
assertEquals(clients.length, infos.size());
}
}, "--tx");
// Check kill.
validate(h, map -> {
// No-op.
}, "--tx", "--kill");
// Wait for all remote txs to finish.
for (Ignite ignite : G.allGrids()) {
if (ignite.configuration().isClientMode())
continue;
Collection<IgniteInternalTx> txs = ((IgniteEx) ignite).context().cache().context().tm().activeTransactions();
for (IgniteInternalTx tx : txs) if (!tx.local())
tx.finishFuture().get();
}
// Unblock finish message from client1.
primSpi.stopBlock(true);
fut.get();
Long cur = (Long) clients[0].cache(DEFAULT_CACHE_NAME).get(0L);
assertEquals(tc - 1, cur.longValue());
checkUserFutures();
}
use of org.apache.ignite.internal.visor.tx.VisorTxInfo in project ignite by apache.
the class GridCommandHandlerTest method testActiveTransactions.
/**
* Test active transactions.
*
* @throws Exception If failed.
*/
@Test
public void testActiveTransactions() throws Exception {
Ignite ignite = startGridsMultiThreaded(2);
ignite.cluster().active(true);
Ignite client = startGrid("client");
client.getOrCreateCache(new CacheConfiguration<>(DEFAULT_CACHE_NAME).setAtomicityMode(TRANSACTIONAL).setWriteSynchronizationMode(FULL_SYNC));
for (Ignite ig : G.allGrids()) assertNotNull(ig.cache(DEFAULT_CACHE_NAME));
CountDownLatch lockLatch = new CountDownLatch(1);
CountDownLatch unlockLatch = new CountDownLatch(1);
IgniteInternalFuture<?> fut = startTransactions("testActiveTransactions", lockLatch, unlockLatch, true);
U.awaitQuiet(lockLatch);
doSleep(5000);
CommandHandler h = new CommandHandler();
final VisorTxInfo[] toKill = { null };
// Basic test.
validate(h, map -> {
VisorTxTaskResult res = map.get(grid(0).cluster().localNode());
for (VisorTxInfo info : res.getInfos()) {
if (info.getSize() == 100) {
// Store for further use.
toKill[0] = info;
break;
}
}
assertEquals(3, map.size());
}, "--tx");
assertNotNull(toKill[0]);
// Test filter by label.
validate(h, map -> {
ClusterNode node = grid(0).cluster().localNode();
for (Map.Entry<ClusterNode, VisorTxTaskResult> entry : map.entrySet()) assertEquals(entry.getKey().equals(node) ? 1 : 0, entry.getValue().getInfos().size());
}, "--tx", "--label", "label1");
// Test filter by label regex.
validate(h, map -> {
ClusterNode node1 = grid(0).cluster().localNode();
ClusterNode node2 = grid("client").cluster().localNode();
for (Map.Entry<ClusterNode, VisorTxTaskResult> entry : map.entrySet()) {
if (entry.getKey().equals(node1)) {
assertEquals(1, entry.getValue().getInfos().size());
assertEquals("label1", entry.getValue().getInfos().get(0).getLabel());
} else if (entry.getKey().equals(node2)) {
assertEquals(1, entry.getValue().getInfos().size());
assertEquals("label2", entry.getValue().getInfos().get(0).getLabel());
} else
assertTrue(entry.getValue().getInfos().isEmpty());
}
}, "--tx", "--label", "^label[0-9]");
// Test filter by empty label.
validate(h, map -> {
VisorTxTaskResult res = map.get(grid(0).localNode());
for (VisorTxInfo info : res.getInfos()) assertNull(info.getLabel());
}, "--tx", "--label", "null");
// test check minSize
int minSize = 10;
validate(h, map -> {
VisorTxTaskResult res = map.get(grid(0).localNode());
assertNotNull(res);
for (VisorTxInfo txInfo : res.getInfos()) assertTrue(txInfo.getSize() >= minSize);
}, "--tx", "--min-size", Integer.toString(minSize));
// test order by size.
validate(h, map -> {
VisorTxTaskResult res = map.get(grid(0).localNode());
assertTrue(res.getInfos().get(0).getSize() >= res.getInfos().get(1).getSize());
}, "--tx", "--order", "SIZE");
// test order by duration.
validate(h, map -> {
VisorTxTaskResult res = map.get(grid(0).localNode());
assertTrue(res.getInfos().get(0).getDuration() >= res.getInfos().get(1).getDuration());
}, "--tx", "--order", "DURATION");
// test order by start_time.
validate(h, map -> {
VisorTxTaskResult res = map.get(grid(0).localNode());
for (int i = res.getInfos().size() - 1; i > 1; i--) assertTrue(res.getInfos().get(i - 1).getStartTime() >= res.getInfos().get(i).getStartTime());
}, "--tx", "--order", "START_TIME");
// Trigger topology change and test connection.
IgniteInternalFuture<?> startFut = multithreadedAsync(() -> {
try {
startGrid(2);
} catch (Exception e) {
fail();
}
}, 1, "start-node-thread");
// Give enough time to reach exchange future.
doSleep(5000);
assertEquals(EXIT_CODE_OK, execute(h, "--tx"));
// Test kill by xid.
validate(h, map -> {
assertEquals(1, map.size());
Map.Entry<ClusterNode, VisorTxTaskResult> killedEntry = map.entrySet().iterator().next();
VisorTxInfo info = killedEntry.getValue().getInfos().get(0);
assertEquals(toKill[0].getXid(), info.getXid());
}, "--tx", "--kill", // Use saved on first run value.
"--xid", // Use saved on first run value.
toKill[0].getXid().toString(), "--nodes", grid(0).localNode().consistentId().toString());
unlockLatch.countDown();
startFut.get();
fut.get();
awaitPartitionMapExchange();
checkUserFutures();
}
Aggregations