use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class GridClusterStateProcessor method start.
/** {@inheritDoc} */
@Override
public void start(boolean activeOnStart) throws IgniteCheckedException {
super.start(activeOnStart);
globalState = activeOnStart ? ACTIVE : INACTIVE;
cacheProc = ctx.cache();
sharedCtx = cacheProc.context();
sharedCtx.io().addHandler(0, GridChangeGlobalStateMessageResponse.class, new CI2<UUID, GridChangeGlobalStateMessageResponse>() {
@Override
public void apply(UUID nodeId, GridChangeGlobalStateMessageResponse msg) {
processChangeGlobalStateResponse(nodeId, msg);
}
});
ctx.discovery().setCustomEventListener(ChangeGlobalStateMessage.class, new CustomEventListener<ChangeGlobalStateMessage>() {
@Override
public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, ChangeGlobalStateMessage msg) {
assert topVer != null;
assert snd != null;
assert msg != null;
boolean activate = msg.activate();
ChangeGlobalStateContext actx = lastCgsCtx;
if (actx != null && globalState == TRANSITION) {
GridChangeGlobalStateFuture f = cgsLocFut.get();
if (log.isDebugEnabled())
log.debug("Concurrent " + prettyStr(activate) + " [id=" + ctx.localNodeId() + " topVer=" + topVer + " actx=" + actx + ", msg=" + msg + "]");
if (f != null && f.requestId.equals(msg.requestId()))
f.onDone(new IgniteCheckedException("Concurrent change state, now in progress=" + (activate) + ", initiatingNodeId=" + actx.initiatingNodeId + ", you try=" + (prettyStr(activate)) + ", locNodeId=" + ctx.localNodeId()));
msg.concurrentChangeState();
} else {
if (log.isInfoEnabled())
log.info("Create " + prettyStr(activate) + " context [id=" + ctx.localNodeId() + " topVer=" + topVer + ", reqId=" + msg.requestId() + ", initiatingNodeId=" + msg.initiatorNodeId() + "]");
lastCgsCtx = new ChangeGlobalStateContext(msg.requestId(), msg.initiatorNodeId(), msg.getDynamicCacheChangeBatch(), msg.activate());
globalState = TRANSITION;
}
}
});
ctx.event().addLocalEventListener(lsr, EVT_NODE_LEFT, EVT_NODE_FAILED);
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class TcpCommunicationSpi method safeHandshake.
/**
* Performs handshake in timeout-safe way.
*
* @param client Client.
* @param recovery Recovery descriptor if use recovery handshake, otherwise {@code null}.
* @param rmtNodeId Remote node.
* @param timeout Timeout for handshake.
* @param sslMeta Session meta.
* @param handshakeConnIdx Non null connection index if need send it in handshake.
* @throws IgniteCheckedException If handshake failed or wasn't completed withing timeout.
* @return Handshake response.
*/
@SuppressWarnings("ThrowFromFinallyBlock")
private <T> long safeHandshake(T client, @Nullable GridNioRecoveryDescriptor recovery, UUID rmtNodeId, long timeout, GridSslMeta sslMeta, @Nullable Integer handshakeConnIdx) throws IgniteCheckedException {
HandshakeTimeoutObject<T> obj = new HandshakeTimeoutObject<>(client, U.currentTimeMillis() + timeout);
addTimeoutObject(obj);
long rcvCnt = 0;
try {
if (client instanceof GridCommunicationClient)
((GridCommunicationClient) client).doHandshake(new HandshakeClosure(rmtNodeId));
else {
SocketChannel ch = (SocketChannel) client;
boolean success = false;
try {
BlockingSslHandler sslHnd = null;
ByteBuffer buf;
if (isSslEnabled()) {
assert sslMeta != null;
sslHnd = new BlockingSslHandler(sslMeta.sslEngine(), ch, directBuf, ByteOrder.nativeOrder(), log);
if (!sslHnd.handshake())
throw new IgniteCheckedException("SSL handshake is not completed.");
ByteBuffer handBuff = sslHnd.applicationBuffer();
if (handBuff.remaining() < NodeIdMessage.MESSAGE_FULL_SIZE) {
buf = ByteBuffer.allocate(1000);
int read = ch.read(buf);
if (read == -1)
throw new IgniteCheckedException("Failed to read remote node ID (connection closed).");
buf.flip();
buf = sslHnd.decode(buf);
} else
buf = handBuff;
} else {
buf = ByteBuffer.allocate(NodeIdMessage.MESSAGE_FULL_SIZE);
for (int i = 0; i < NodeIdMessage.MESSAGE_FULL_SIZE; ) {
int read = ch.read(buf);
if (read == -1)
throw new IgniteCheckedException("Failed to read remote node ID (connection closed).");
i += read;
}
}
UUID rmtNodeId0 = U.bytesToUuid(buf.array(), Message.DIRECT_TYPE_SIZE);
if (!rmtNodeId.equals(rmtNodeId0))
throw new IgniteCheckedException("Remote node ID is not as expected [expected=" + rmtNodeId + ", rcvd=" + rmtNodeId0 + ']');
else if (log.isDebugEnabled())
log.debug("Received remote node ID: " + rmtNodeId0);
if (isSslEnabled()) {
assert sslHnd != null;
ch.write(sslHnd.encrypt(ByteBuffer.wrap(U.IGNITE_HEADER)));
} else
ch.write(ByteBuffer.wrap(U.IGNITE_HEADER));
ClusterNode locNode = getLocalNode();
if (locNode == null)
throw new IgniteCheckedException("Local node has not been started or " + "fully initialized [isStopping=" + getSpiContext().isStopping() + ']');
if (recovery != null) {
HandshakeMessage msg;
int msgSize = HandshakeMessage.MESSAGE_FULL_SIZE;
if (handshakeConnIdx != null) {
msg = new HandshakeMessage2(locNode.id(), recovery.incrementConnectCount(), recovery.received(), handshakeConnIdx);
msgSize += 4;
} else {
msg = new HandshakeMessage(locNode.id(), recovery.incrementConnectCount(), recovery.received());
}
if (log.isDebugEnabled())
log.debug("Writing handshake message [locNodeId=" + locNode.id() + ", rmtNode=" + rmtNodeId + ", msg=" + msg + ']');
buf = ByteBuffer.allocate(msgSize);
buf.order(ByteOrder.nativeOrder());
boolean written = msg.writeTo(buf, null);
assert written;
buf.flip();
if (isSslEnabled()) {
assert sslHnd != null;
ch.write(sslHnd.encrypt(buf));
} else
ch.write(buf);
} else {
if (isSslEnabled()) {
assert sslHnd != null;
ch.write(sslHnd.encrypt(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType)));
} else
ch.write(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType));
}
if (recovery != null) {
if (log.isDebugEnabled())
log.debug("Waiting for handshake [rmtNode=" + rmtNodeId + ']');
if (isSslEnabled()) {
assert sslHnd != null;
buf = ByteBuffer.allocate(1000);
buf.order(ByteOrder.nativeOrder());
ByteBuffer decode = ByteBuffer.allocate(2 * buf.capacity());
decode.order(ByteOrder.nativeOrder());
for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) {
int read = ch.read(buf);
if (read == -1)
throw new IgniteCheckedException("Failed to read remote node recovery handshake " + "(connection closed).");
buf.flip();
ByteBuffer decode0 = sslHnd.decode(buf);
i += decode0.remaining();
decode = appendAndResizeIfNeeded(decode, decode0);
buf.clear();
}
decode.flip();
rcvCnt = decode.getLong(Message.DIRECT_TYPE_SIZE);
if (decode.limit() > RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE) {
decode.position(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE);
sslMeta.decodedBuffer(decode);
}
ByteBuffer inBuf = sslHnd.inputBuffer();
if (inBuf.position() > 0)
sslMeta.encodedBuffer(inBuf);
} else {
buf = ByteBuffer.allocate(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE);
buf.order(ByteOrder.nativeOrder());
for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) {
int read = ch.read(buf);
if (read == -1)
throw new IgniteCheckedException("Failed to read remote node recovery handshake " + "(connection closed).");
i += read;
}
rcvCnt = buf.getLong(Message.DIRECT_TYPE_SIZE);
}
if (log.isDebugEnabled())
log.debug("Received handshake message [rmtNode=" + rmtNodeId + ", rcvCnt=" + rcvCnt + ']');
if (rcvCnt == -1) {
if (log.isDebugEnabled())
log.debug("Connection rejected, will retry client creation [rmtNode=" + rmtNodeId + ']');
} else
success = true;
} else
success = true;
} catch (IOException e) {
if (log.isDebugEnabled())
log.debug("Failed to read from channel: " + e);
throw new IgniteCheckedException("Failed to read from channel.", e);
} finally {
if (!success)
U.closeQuiet(ch);
}
}
} finally {
boolean cancelled = obj.cancel();
if (cancelled)
removeTimeoutObject(obj);
// Ignoring whatever happened after timeout - reporting only timeout event.
if (!cancelled)
throw new HandshakeTimeoutException("Failed to perform handshake due to timeout (consider increasing " + "'connectionTimeout' configuration property).");
}
return rcvCnt;
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class TxOptimisticDeadlockDetectionTest method doTestDeadlock.
/**
* @throws Exception If failed.
*/
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final IgniteClosure<Integer, Object> transformer) throws Exception {
log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", transformer=" + transformer.getClass().getName() + ']');
TestCommunicationSpi.init(txCnt);
final AtomicInteger threadCnt = new AtomicInteger();
final CyclicBarrier barrier = new CyclicBarrier(txCnt);
final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
final List<List<Integer>> keySets = generateKeys(txCnt, loc, !lockPrimaryFirst);
final Set<Integer> involvedKeys = new GridConcurrentHashSet<>();
final Set<Integer> involvedLockedKeys = new GridConcurrentHashSet<>();
final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
int threadNum = threadCnt.incrementAndGet();
Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME);
List<Integer> keys = keySets.get(threadNum - 1);
int txTimeout = 500 + txCnt * 100;
try (Transaction tx = ignite.transactions().txStart(OPTIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
IgniteInternalTx tx0 = ((TransactionProxyImpl) tx).tx();
involvedTxs.add(tx0);
Integer key = keys.get(0);
involvedKeys.add(key);
Object k;
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode().id() + ", tx=" + tx.xid() + ", key=" + transformer.apply(key) + ']');
cache.put(transformer.apply(key), 0);
involvedLockedKeys.add(key);
barrier.await();
key = keys.get(1);
ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
List<Integer> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, key + (100 * threadNum));
Map<Object, Integer> entries = new HashMap<>();
involvedKeys.add(key);
entries.put(transformer.apply(key), 0);
for (Integer i : primaryKeys) {
involvedKeys.add(i);
entries.put(transformer.apply(i), 1);
k = transformer.apply(i + 13);
involvedKeys.add(i + 13);
entries.put(k, 2);
}
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode().id() + ", tx=" + tx.xid() + ", entries=" + entries + ']');
cache.putAll(entries);
tx.commit();
} catch (Throwable e) {
log.info("Expected exception: " + e);
e.printStackTrace(System.out);
// At least one stack trace should contain TransactionDeadlockException.
if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class))) {
log.info("At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName());
e.printStackTrace(System.out);
}
}
}
}
}, loc ? 2 : txCnt, "tx-thread");
try {
fut.get();
} catch (IgniteCheckedException e) {
U.error(null, "Unexpected exception", e);
fail();
}
U.sleep(1000);
TransactionDeadlockException deadlockE = deadlockErr.get();
assertNotNull("Failed to detect deadlock", deadlockE);
boolean fail = false;
// Check transactions, futures and entry locks state.
for (int i = 0; i < NODES_CNT * 2; i++) {
Ignite ignite = ignite(i);
int cacheId = ((IgniteCacheProxy) ignite.cache(CACHE_NAME)).context().cacheId();
GridCacheSharedContext<Object, Object> cctx = ((IgniteKernal) ignite).context().cache().context();
IgniteTxManager txMgr = cctx.tm();
Collection<IgniteInternalTx> activeTxs = txMgr.activeTransactions();
for (IgniteInternalTx tx : activeTxs) {
Collection<IgniteTxEntry> entries = tx.allEntries();
for (IgniteTxEntry entry : entries) {
if (entry.cacheId() == cacheId) {
fail = true;
U.error(log, "Transaction still exists: " + "\n" + tx.xidVersion() + "\n" + tx.nearXidVersion() + "\n nodeId=" + cctx.localNodeId() + "\n tx=" + tx);
}
}
}
Collection<IgniteInternalFuture<?>> futs = txMgr.deadlockDetectionFutures();
assertTrue(futs.isEmpty());
GridCacheAdapter<Object, Integer> intCache = internalCache(i, CACHE_NAME);
GridCacheConcurrentMap map = intCache.map();
for (Integer key : involvedKeys) {
Object key0 = transformer.apply(key);
KeyCacheObject keyCacheObj = intCache.context().toCacheKeyObject(key0);
GridCacheMapEntry entry = map.getEntry(keyCacheObj);
if (entry != null)
assertNull("Entry still has locks " + entry, entry.mvccAllLocal());
}
}
if (fail)
fail("Some transactions still exist");
// Check deadlock report
String msg = deadlockE.getMessage();
for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
for (Integer key : involvedKeys) {
if (involvedLockedKeys.contains(key))
assertTrue(msg.contains("[key=" + transformer.apply(key) + ", cache=" + CACHE_NAME + ']'));
else
assertFalse(msg.contains("[key=" + transformer.apply(key)));
}
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class TxPessimisticDeadlockDetectionTest method doTestDeadlock.
/**
* @throws Exception If failed.
*/
private void doTestDeadlock(final int txCnt, final boolean loc, boolean lockPrimaryFirst, final boolean clientTx, final IgniteClosure<Integer, Object> transformer) throws Exception {
log.info(">>> Test deadlock [txCnt=" + txCnt + ", loc=" + loc + ", lockPrimaryFirst=" + lockPrimaryFirst + ", clientTx=" + clientTx + ", transformer=" + transformer.getClass().getName() + ']');
final AtomicInteger threadCnt = new AtomicInteger();
final CyclicBarrier barrier = new CyclicBarrier(txCnt);
final AtomicReference<TransactionDeadlockException> deadlockErr = new AtomicReference<>();
final List<List<Integer>> keySets = generateKeys(txCnt, loc, !lockPrimaryFirst);
final Set<Integer> involvedKeys = new GridConcurrentHashSet<>();
final Set<Integer> involvedLockedKeys = new GridConcurrentHashSet<>();
final Set<IgniteInternalTx> involvedTxs = new GridConcurrentHashSet<>();
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
int threadNum = threadCnt.incrementAndGet();
Ignite ignite = loc ? ignite(0) : ignite(clientTx ? threadNum - 1 + txCnt : threadNum - 1);
IgniteCache<Object, Integer> cache = ignite.cache(CACHE_NAME);
List<Integer> keys = keySets.get(threadNum - 1);
int txTimeout = 500 + txCnt * 100;
try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, txTimeout, 0)) {
involvedTxs.add(((TransactionProxyImpl) tx).tx());
Integer key = keys.get(0);
involvedKeys.add(key);
Object k;
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", key=" + transformer.apply(key) + ']');
cache.put(transformer.apply(key), 0);
involvedLockedKeys.add(key);
barrier.await();
key = keys.get(1);
ClusterNode primaryNode = ((IgniteCacheProxy) cache).context().affinity().primaryByKey(key, NONE);
List<Integer> primaryKeys = primaryKeys(grid(primaryNode).cache(CACHE_NAME), 5, key + (100 * threadNum));
Map<Object, Integer> entries = new HashMap<>();
involvedKeys.add(key);
entries.put(transformer.apply(key), 0);
for (Integer i : primaryKeys) {
involvedKeys.add(i);
entries.put(transformer.apply(i), 1);
k = transformer.apply(i + 13);
involvedKeys.add(i + 13);
entries.put(k, 2);
}
log.info(">>> Performs put [node=" + ((IgniteKernal) ignite).localNode() + ", tx=" + tx + ", entries=" + entries + ']');
cache.putAll(entries);
tx.commit();
} catch (Throwable e) {
// At least one stack trace should contain TransactionDeadlockException.
if (hasCause(e, TransactionTimeoutException.class) && hasCause(e, TransactionDeadlockException.class)) {
if (deadlockErr.compareAndSet(null, cause(e, TransactionDeadlockException.class)))
U.error(log, "At least one stack trace should contain " + TransactionDeadlockException.class.getSimpleName(), e);
}
}
}
}, loc ? 2 : txCnt, "tx-thread");
try {
fut.get();
} catch (IgniteCheckedException e) {
U.error(null, "Unexpected exception", e);
fail();
}
U.sleep(1000);
TransactionDeadlockException deadlockE = deadlockErr.get();
assertNotNull(deadlockE);
boolean fail = false;
// Check transactions, futures and entry locks state.
for (int i = 0; i < NODES_CNT * 2; i++) {
Ignite ignite = ignite(i);
int cacheId = ((IgniteCacheProxy) ignite.cache(CACHE_NAME)).context().cacheId();
GridCacheSharedContext<Object, Object> cctx = ((IgniteKernal) ignite).context().cache().context();
IgniteTxManager txMgr = cctx.tm();
Collection<IgniteInternalTx> activeTxs = txMgr.activeTransactions();
for (IgniteInternalTx tx : activeTxs) {
Collection<IgniteTxEntry> entries = tx.allEntries();
for (IgniteTxEntry entry : entries) {
if (entry.cacheId() == cacheId) {
fail = true;
U.error(log, "Transaction still exists: " + "\n" + tx.xidVersion() + "\n" + tx.nearXidVersion() + "\n nodeId=" + cctx.localNodeId() + "\n tx=" + tx);
}
}
}
Collection<IgniteInternalFuture<?>> futs = txMgr.deadlockDetectionFutures();
assertTrue(futs.isEmpty());
GridCacheAdapter<Object, Integer> intCache = internalCache(i, CACHE_NAME);
GridCacheConcurrentMap map = intCache.map();
for (Integer key : involvedKeys) {
Object key0 = transformer.apply(key);
KeyCacheObject keyCacheObj = intCache.context().toCacheKeyObject(key0);
GridCacheMapEntry entry = map.getEntry(keyCacheObj);
if (entry != null)
assertNull("Entry still has locks " + entry, entry.mvccAllLocal());
}
}
if (fail)
fail("Some transactions still exist");
// Check deadlock report
String msg = deadlockE.getMessage();
for (IgniteInternalTx tx : involvedTxs) assertTrue(msg.contains("[txId=" + tx.xidVersion() + ", nodeId=" + tx.nodeId() + ", threadId=" + tx.threadId() + ']'));
for (Integer key : involvedKeys) {
if (involvedLockedKeys.contains(key))
assertTrue(msg.contains("[key=" + transformer.apply(key) + ", cache=" + CACHE_NAME + ']'));
else
assertFalse(msg.contains("[key=" + transformer.apply(key)));
}
}
use of org.apache.ignite.cluster.ClusterNode in project ignite by apache.
the class CacheUtils method foreach.
/**
* @param cacheName Cache name.
* @param fun An operation that accepts a cache entry and processes it.
* @param keyFilter Cache keys filter.
* @param <K> Cache key object type.
* @param <V> Cache value object type.
*/
public static <K, V> void foreach(String cacheName, IgniteConsumer<CacheEntry<K, V>> fun, IgnitePredicate<K> keyFilter) {
bcast(cacheName, () -> {
Ignite ignite = Ignition.localIgnite();
IgniteCache<K, V> cache = ignite.getOrCreateCache(cacheName);
int partsCnt = ignite.affinity(cacheName).partitions();
// Use affinity in filter for scan query. Otherwise we accept consumer in each node which is wrong.
Affinity affinity = ignite.affinity(cacheName);
ClusterNode locNode = ignite.cluster().localNode();
// Iterate over all partitions. Some of them will be stored on that local node.
for (int part = 0; part < partsCnt; part++) {
int p = part;
// Query returns an empty cursor if this partition is not stored on this node.
for (Cache.Entry<K, V> entry : cache.query(new ScanQuery<K, V>(part, (k, v) -> affinity.mapPartitionToNode(p) == locNode && (keyFilter == null || keyFilter.apply(k))))) fun.accept(new CacheEntry<>(entry, cache));
}
});
}
Aggregations