use of org.apache.ignite.IgniteClientDisconnectedException in project ignite by apache.
the class GridAbstractTest method waitForTopology.
/**
* @param expSize Expected nodes number.
* @throws Exception If failed.
*/
protected void waitForTopology(final int expSize) throws Exception {
assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
List<Ignite> nodes = G.allGrids();
if (nodes.size() != expSize) {
info("Wait all nodes [size=" + nodes.size() + ", exp=" + expSize + ']');
return false;
}
for (Ignite node : nodes) {
try {
IgniteFuture<?> reconnectFut = node.cluster().clientReconnectFuture();
if (reconnectFut != null && !reconnectFut.isDone()) {
info("Wait for size on node, reconnect is in progress [node=" + node.name() + ']');
return false;
}
int sizeOnNode = node.cluster().nodes().size();
if (sizeOnNode != expSize) {
info("Wait for size on node [node=" + node.name() + ", size=" + sizeOnNode + ", exp=" + expSize + ']');
return false;
}
} catch (IgniteClientDisconnectedException e) {
info("Wait for size on node, node disconnected [node=" + node.name() + ']');
return false;
}
}
return true;
}
}, 30_000));
}
use of org.apache.ignite.IgniteClientDisconnectedException in project ignite by apache.
the class ZookeeperDiscoveryImpl method sendCustomMessage.
/**
* @param msg Message.
*/
public void sendCustomMessage(DiscoverySpiCustomMessage msg) {
assert msg != null;
List<ClusterNode> nodes = rtState.top.topologySnapshot();
boolean hasServerNode = false;
for (int i = 0, size = nodes.size(); i < size; i++) {
ClusterNode node = nodes.get(i);
if (!node.isClient())
hasServerNode = true;
}
if (!hasServerNode)
throw new IgniteException("Failed to send custom message: no server nodes in topology.");
byte[] msgBytes;
try {
msgBytes = marshalZip(msg);
} catch (IgniteCheckedException e) {
throw new IgniteSpiException("Failed to marshal custom message: " + msg, e);
}
while (!busyLock.enterBusy()) checkState();
try {
ZookeeperClient zkClient = rtState.zkClient;
saveCustomMessage(zkClient, msgBytes);
} catch (ZookeeperClientFailedException e) {
if (clientReconnectEnabled)
throw new IgniteClientDisconnectedException(null, "Client is disconnected.");
throw new IgniteException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IgniteInterruptedException(e);
} finally {
busyLock.leaveBusy();
}
}
use of org.apache.ignite.IgniteClientDisconnectedException in project ignite by apache.
the class TcpClientDiscoverySpiSelfTest method testForceClientReconnect.
/**
* @throws Exception If failed.
*/
@Test
public void testForceClientReconnect() throws Exception {
startServerNodes(1);
startClientNodes(1);
Ignite srv = G.ignite("server-0");
IgniteKernal client = (IgniteKernal) G.ignite("client-0");
UUID clientId = F.first(clientNodeIds);
final CountDownLatch latch = new CountDownLatch(1);
srv.events().enableLocal(EVT_NODE_JOINED);
srv.events().localListen(new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
latch.countDown();
return false;
}
}, EVT_NODE_JOINED);
client.context().discovery().reconnect();
assert latch.await(10, TimeUnit.SECONDS);
while (true) {
try {
UUID newId = client.localNode().id();
assert !clientId.equals(newId) : clientId;
break;
} catch (IgniteClientDisconnectedException e) {
e.reconnectFuture().get(10_000);
}
}
}
use of org.apache.ignite.IgniteClientDisconnectedException in project ignite by apache.
the class WebSessionFilter method handleCacheOperationException.
/**
* Handles cache operation exception.
* @param e Exception
*/
void handleCacheOperationException(Exception e) {
IgniteFuture<?> retryFut = null;
if (e instanceof IllegalStateException) {
initCache();
return;
} else if (X.hasCause(e, IgniteClientDisconnectedException.class)) {
IgniteClientDisconnectedException cause = X.cause(e, IgniteClientDisconnectedException.class);
assert cause != null : e;
retryFut = cause.reconnectFuture();
} else if (X.hasCause(e, ClusterTopologyException.class)) {
ClusterTopologyException cause = X.cause(e, ClusterTopologyException.class);
assert cause != null : e;
retryFut = cause.retryReadyFuture();
}
if (retryFut != null) {
try {
retryFut.get(retriesTimeout);
} catch (IgniteException retryErr) {
throw new IgniteException("Failed to wait for retry: " + retryErr);
}
}
}
use of org.apache.ignite.IgniteClientDisconnectedException in project ignite by apache.
the class GridReduceQueryExecutor method query.
/**
* @param qryId Query ID.
* @param schemaName Schema name.
* @param qry Query.
* @param keepBinary Keep binary.
* @param enforceJoinOrder Enforce join order of tables.
* @param timeoutMillis Timeout in milliseconds.
* @param cancel Query cancel.
* @param params Query parameters.
* @param parts Partitions.
* @param lazy Lazy execution flag.
* @param mvccTracker Query tracker.
* @param dataPageScanEnabled If data page scan is enabled.
* @param pageSize Page size.
* @return Rows iterator.
*/
@SuppressWarnings("IfMayBeConditional")
public Iterator<List<?>> query(long qryId, String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, int[] parts, boolean lazy, MvccQueryTracker mvccTracker, Boolean dataPageScanEnabled, int pageSize) {
assert !qry.mvccEnabled() || mvccTracker != null;
if (pageSize <= 0)
pageSize = Query.DFLT_PAGE_SIZE;
// If explicit partitions are set, but there are no real tables, ignore.
if (!qry.hasCacheIds() && parts != null)
parts = null;
// Partitions are not supported for queries over all replicated caches.
if (parts != null && qry.isReplicatedOnly())
throw new CacheException("Partitions are not supported for replicated caches");
try {
if (qry.mvccEnabled())
checkActive(tx(ctx));
} catch (IgniteTxAlreadyCompletedCheckedException e) {
throw new TransactionAlreadyCompletedException(e.getMessage(), e);
}
final boolean singlePartMode = parts != null && parts.length == 1;
if (F.isEmpty(params))
params = EMPTY_PARAMS;
List<Integer> cacheIds = qry.cacheIds();
List<GridCacheSqlQuery> mapQueries = prepareMapQueries(qry, params, singlePartMode);
final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable() || singlePartMode;
final long retryTimeout = retryTimeout(timeoutMillis);
final long qryStartTime = U.currentTimeMillis();
ReduceQueryRun lastRun = null;
for (int attempt = 0; ; attempt++) {
ensureQueryNotCancelled(cancel);
if (attempt > 0) {
throttleOnRetry(lastRun, qryStartTime, retryTimeout, attempt);
ensureQueryNotCancelled(cancel);
}
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
// Check if topology has changed while retrying on locked topology.
if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
}
ReducePartitionMapResult mapping = createMapping(qry, parts, cacheIds, topVer);
if (// Can't map query.
mapping == null)
// Retry.
continue;
final Collection<ClusterNode> nodes = mapping.nodes();
final Map<ClusterNode, Integer> nodeToSegmentsCnt = createNodeToSegmentsCountMapping(qry, mapping);
assert !F.isEmpty(nodes);
H2PooledConnection conn = h2.connections().connection(schemaName);
final long qryReqId = qryReqIdGen.incrementAndGet();
h2.runningQueryManager().trackRequestId(qryReqId);
boolean release = true;
try {
final ReduceQueryRun r = createReduceQueryRun(conn, mapQueries, nodes, pageSize, nodeToSegmentsCnt, skipMergeTbl, qry.explain(), dataPageScanEnabled);
runs.put(qryReqId, r);
try {
cancel.add(() -> send(nodes, new GridQueryCancelRequest(qryReqId), null, true));
GridH2QueryRequest req = new GridH2QueryRequest().queryId(qryId).requestId(qryReqId).topologyVersion(topVer).pageSize(pageSize).caches(qry.cacheIds()).tables(qry.distributedJoins() ? qry.tables() : null).partitions(convert(mapping.partitionsMap())).queries(mapQueries).parameters(params).flags(queryFlags(qry, enforceJoinOrder, lazy, dataPageScanEnabled)).timeout(timeoutMillis).explicitTimeout(true).schemaName(schemaName);
if (mvccTracker != null)
req.mvccSnapshot(mvccTracker.snapshot());
final C2<ClusterNode, Message, Message> spec = parts == null ? null : new ReducePartitionsSpecializer(mapping.queryPartitionsMap());
boolean retry = false;
if (send(nodes, req, spec, false)) {
awaitAllReplies(r, nodes, cancel);
if (r.hasErrorOrRetry()) {
CacheException err = r.exception();
if (err != null) {
if (err.getCause() instanceof IgniteClientDisconnectedException)
throw err;
else if (QueryUtils.wasCancelled(err))
// Throw correct exception.
throw new QueryCancelledException();
throw err;
}
// If remote node asks us to retry then we have outdated full partition map.
h2.awaitForReadyTopologyVersion(r.retryTopologyVersion());
retry = true;
}
} else
retry = true;
if (retry) {
lastRun = runs.get(qryReqId);
assert lastRun != null;
// Retry.
continue;
}
Iterator<List<?>> resIter;
if (skipMergeTbl) {
resIter = new ReduceIndexIterator(this, nodes, r, qryReqId, qry.distributedJoins(), mvccTracker, ctx.tracing());
release = false;
U.close(conn, log);
} else {
ensureQueryNotCancelled(cancel);
QueryContext qctx = new QueryContext(0, null, null, null, null, true);
H2Utils.setupConnection(conn, qctx, false, enforceJoinOrder);
if (qry.explain())
return explainPlan(conn, qry, params);
GridCacheSqlQuery rdc = qry.reduceQuery();
final PreparedStatement stmt = conn.prepareStatementNoCache(rdc.query());
H2Utils.bindParameters(stmt, F.asList(rdc.parameters(params)));
ReduceH2QueryInfo qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), ctx.localNodeId(), qryId, qryReqId);
ResultSet res = h2.executeSqlQueryWithTimer(stmt, conn, rdc.query(), timeoutMillis, cancel, dataPageScanEnabled, qryInfo);
resIter = new H2FieldsIterator(res, mvccTracker, conn, r.pageSize(), log, h2, qryInfo, ctx.tracing());
conn = null;
// To prevent callback inside finally block;
mvccTracker = null;
}
return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
if (e instanceof CacheException) {
if (QueryUtils.wasCancelled(e))
throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
throw (CacheException) e;
}
Throwable cause = e;
if (e instanceof IgniteCheckedException) {
Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
if (disconnectedErr != null)
cause = disconnectedErr;
}
throw new CacheException("Failed to run reduce query locally. " + cause.getMessage(), cause);
} finally {
if (release) {
releaseRemoteResources(nodes, r, qryReqId, qry.distributedJoins(), mvccTracker);
if (!skipMergeTbl) {
for (int i = 0, mapQrys = mapQueries.size(); i < mapQrys; i++) // Drop all merge tables.
fakeTable(null, i).innerTable(null);
}
}
}
} finally {
if (conn != null && release)
U.close(conn, log);
}
}
}
Aggregations