use of javax.cache.CacheException in project ignite by apache.
the class IgniteClientReconnectFailoverAbstractTest method reconnectFailover.
/**
* @param c Test closure.
* @throws Exception If failed.
*/
protected final void reconnectFailover(final Callable<Void> c) throws Exception {
final Ignite client = grid(serverCount());
assertTrue(client.cluster().localNode().isClient());
Ignite srv = clientRouter(client);
TestTcpDiscoverySpi srvSpi = spi(srv);
final AtomicBoolean stop = new AtomicBoolean(false);
final IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
try {
int iter = 0;
while (!stop.get()) {
try {
c.call();
} catch (CacheException e) {
checkAndWait(e);
} catch (IgniteClientDisconnectedException e) {
checkAndWait(e);
}
if (++iter % 100 == 0)
log.info("Iteration: " + iter);
if (barrier != null)
barrier.await();
}
return null;
} catch (Throwable e) {
log.error("Unexpected error in operation thread: " + e, e);
stop.set(true);
throw e;
}
}
}, THREADS, "test-operation-thread");
final AtomicReference<CountDownLatch> disconnected = new AtomicReference<>();
final AtomicReference<CountDownLatch> reconnected = new AtomicReference<>();
IgnitePredicate<Event> p = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) {
info("Reconnected: " + evt);
CountDownLatch latch = reconnected.get();
assertNotNull(latch);
assertEquals(1, latch.getCount());
latch.countDown();
} else if (evt.type() == EVT_CLIENT_NODE_DISCONNECTED) {
info("Disconnected: " + evt);
CountDownLatch latch = disconnected.get();
assertNotNull(latch);
assertEquals(1, latch.getCount());
latch.countDown();
}
return true;
}
};
client.events().localListen(p, EVT_CLIENT_NODE_DISCONNECTED, EVT_CLIENT_NODE_RECONNECTED);
try {
long stopTime = System.currentTimeMillis() + TEST_TIME;
String err = null;
while (System.currentTimeMillis() < stopTime && !fut.isDone()) {
U.sleep(500);
CountDownLatch disconnectLatch = new CountDownLatch(1);
CountDownLatch reconnectLatch = new CountDownLatch(1);
disconnected.set(disconnectLatch);
reconnected.set(reconnectLatch);
UUID nodeId = client.cluster().localNode().id();
log.info("Fail client: " + nodeId);
srvSpi.failNode(nodeId, null);
if (!disconnectLatch.await(10_000, MILLISECONDS)) {
err = "Failed to wait for disconnect";
break;
}
if (!reconnectLatch.await(10_000, MILLISECONDS)) {
err = "Failed to wait for reconnect";
break;
}
barrier = new CyclicBarrier(THREADS + 1, new Runnable() {
@Override
public void run() {
barrier = null;
}
});
try {
barrier.await(10, SECONDS);
} catch (TimeoutException ignored) {
err = "Operations hang or fail with unexpected error.";
break;
}
}
if (err != null) {
log.error(err);
U.dumpThreads(log);
CyclicBarrier barrier0 = barrier;
if (barrier0 != null)
barrier0.reset();
stop.set(true);
fut.get();
fail(err);
}
stop.set(true);
fut.get();
} finally {
client.events().stopLocalListen(p);
stop.set(true);
}
}
use of javax.cache.CacheException in project ignite by apache.
the class IgniteClientReconnectFailoverTest method testReconnectTxCache.
/**
* @throws Exception If failed.
*/
public void testReconnectTxCache() throws Exception {
final Ignite client = grid(serverCount());
final IgniteCache<Integer, Integer> cache = client.cache(TX_CACHE);
assertNotNull(cache);
assertEquals(TRANSACTIONAL, cache.getConfiguration(CacheConfiguration.class).getAtomicityMode());
final IgniteTransactions txs = client.transactions();
reconnectFailover(new Callable<Void>() {
@Override
public Void call() throws Exception {
try {
TreeMap<Integer, Integer> map = new TreeMap<>();
ThreadLocalRandom rnd = ThreadLocalRandom.current();
for (int i = 0; i < 5; i++) {
Integer key = rnd.nextInt(0, 100_000);
cache.put(key, key);
assertEquals(key, cache.get(key));
map.put(key, key);
}
for (TransactionConcurrency txConcurrency : TransactionConcurrency.values()) {
try (Transaction tx = txs.txStart(txConcurrency, REPEATABLE_READ)) {
for (Map.Entry<Integer, Integer> e : map.entrySet()) {
cache.put(e.getKey(), e.getValue());
assertNotNull(cache.get(e.getKey()));
}
tx.commit();
}
}
cache.putAll(map);
Map<Integer, Integer> res = cache.getAll(map.keySet());
assertEquals(map, res);
} catch (IgniteClientDisconnectedException e) {
throw e;
} catch (IgniteException e) {
log.info("Ignore error: " + e);
} catch (CacheException e) {
if (e.getCause() instanceof IgniteClientDisconnectedException)
throw e;
else
log.info("Ignore error: " + e);
}
return null;
}
});
}
use of javax.cache.CacheException in project ignite by apache.
the class IgniteClientReconnectStreamerTest method testStreamerReconnectInProgress.
/**
* @throws Exception If failed.
*/
public void testStreamerReconnectInProgress() throws Exception {
Ignite client = grid(serverCount());
assertTrue(client.cluster().localNode().isClient());
Ignite srv = clientRouter(client);
final IgniteCache<Object, Object> srvCache = srv.cache(CACHE_NAME);
final IgniteDataStreamer<Integer, Integer> streamer = client.dataStreamer(CACHE_NAME);
BlockTcpCommunicationSpi commSpi = commSpi(srv);
commSpi.blockMessage(DataStreamerResponse.class);
final IgniteInternalFuture<Object> fut = GridTestUtils.runAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
try {
for (int i = 0; i < 50; i++) streamer.addData(i, i);
streamer.flush();
} catch (CacheException e) {
checkAndWait(e);
return true;
} finally {
streamer.close();
}
return false;
}
});
// Check that client waiting operation.
GridTestUtils.assertThrows(log, new Callable<Object>() {
@Override
public Object call() throws Exception {
return fut.get(200);
}
}, IgniteFutureTimeoutCheckedException.class, null);
assertNotDone(fut);
commSpi.unblockMessage();
reconnectClientNode(client, srv, null);
assertTrue((Boolean) fut.get(2, TimeUnit.SECONDS));
checkStreamerClosed(streamer);
IgniteDataStreamer<Integer, Integer> streamer2 = client.dataStreamer(CACHE_NAME);
for (int i = 0; i < 50; i++) streamer2.addData(i, i);
streamer2.close();
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return srvCache.localSize() == 50;
}
}, 2000L);
assertEquals(50, srvCache.localSize());
}
use of javax.cache.CacheException in project ignite by apache.
the class CachePutEventListenerErrorSelfTest method doTest.
/**
* @param cacheMode Cache mode.
* @param atomicityMode Atomicity mode.
* @throws Exception If failed.
*/
private void doTest(CacheMode cacheMode, CacheAtomicityMode atomicityMode) throws Exception {
Ignite ignite = grid("client");
try {
CacheConfiguration<Integer, Integer> cfg = defaultCacheConfiguration();
cfg.setName("cache");
cfg.setCacheMode(cacheMode);
cfg.setAtomicityMode(atomicityMode);
IgniteCache<Integer, Integer> cache = ignite.createCache(cfg);
IgniteFuture f = cache.putAsync(0, 0);
try {
f.get(2000);
assert false : "Exception was not thrown";
} catch (CacheException e) {
info("Caught expected exception: " + e);
}
} finally {
ignite.destroyCache("cache");
}
}
use of javax.cache.CacheException in project ignite by apache.
the class IgniteH2Indexing method queryDistributedSqlFields.
/** {@inheritDoc} */
@Override
public FieldsQueryCursor<List<?>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId) {
final String sqlQry = qry.getSql();
Connection c = connectionForSchema(schemaName);
final boolean enforceJoinOrder = qry.isEnforceJoinOrder();
final boolean distributedJoins = qry.isDistributedJoins();
final boolean grpByCollocated = qry.isCollocated();
final DistributedJoinMode distributedJoinMode = distributedJoinMode(qry.isLocal(), distributedJoins);
GridCacheTwoStepQuery twoStepQry = null;
List<GridQueryFieldMetadata> meta;
final H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, distributedJoins, enforceJoinOrder, qry.isLocal());
H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey);
if (cachedQry != null) {
twoStepQry = cachedQry.query().copy();
meta = cachedQry.meta();
} else {
final UUID locNodeId = ctx.localNodeId();
// Here we will just parse the statement, no need to optimize it at all.
H2Utils.setupConnection(c, /*distributedJoins*/
false, /*enforceJoinOrder*/
true);
GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE).distributedJoinMode(distributedJoinMode));
PreparedStatement stmt = null;
Prepared prepared;
boolean cachesCreated = false;
try {
try {
while (true) {
try {
// Do not cache this statement because the whole query object will be cached later on.
stmt = prepareStatement(c, sqlQry, false);
break;
} catch (SQLException e) {
if (!cachesCreated && (e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1)) {
try {
ctx.cache().createMissingQueryCaches();
} catch (IgniteCheckedException ignored) {
throw new CacheException("Failed to create missing caches.", e);
}
cachesCreated = true;
} else
throw new IgniteSQLException("Failed to parse query: " + sqlQry, IgniteQueryErrorCode.PARSING, e);
}
}
prepared = GridSqlQueryParser.prepared(stmt);
if (qry instanceof JdbcSqlFieldsQuery && ((JdbcSqlFieldsQuery) qry).isQuery() != prepared.isQuery())
throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", IgniteQueryErrorCode.STMT_TYPE_MISMATCH);
if (prepared.isQuery()) {
bindParameters(stmt, F.asList(qry.getArgs()));
twoStepQry = GridSqlQuerySplitter.split((JdbcPreparedStatement) stmt, qry.getArgs(), grpByCollocated, distributedJoins, enforceJoinOrder, this);
assert twoStepQry != null;
}
} finally {
GridH2QueryContext.clearThreadLocal();
}
// It is a DML statement if we did not create a twoStepQuery.
if (twoStepQry == null) {
if (DmlStatementsProcessor.isDmlStatement(prepared)) {
try {
return dmlProc.updateSqlFieldsDistributed(schemaName, stmt, qry, cancel);
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
if (DdlStatementsProcessor.isDdlStatement(prepared)) {
try {
return ddlProc.runDdlStatement(sqlQry, stmt);
} catch (IgniteCheckedException e) {
throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
}
}
}
LinkedHashSet<Integer> caches0 = new LinkedHashSet<>();
assert twoStepQry != null;
int tblCnt = twoStepQry.tablesCount();
if (mainCacheId != null)
caches0.add(mainCacheId);
if (tblCnt > 0) {
for (QueryTable tblKey : twoStepQry.tables()) {
GridH2Table tbl = dataTable(tblKey);
int cacheId = CU.cacheId(tbl.cacheName());
caches0.add(cacheId);
}
}
if (caches0.isEmpty())
twoStepQry.local(true);
else {
//Prohibit usage indices with different numbers of segments in same query.
List<Integer> cacheIds = new ArrayList<>(caches0);
checkCacheIndexSegmentation(cacheIds);
twoStepQry.cacheIds(cacheIds);
twoStepQry.local(qry.isLocal());
}
meta = H2Utils.meta(stmt.getMetaData());
} catch (IgniteCheckedException e) {
throw new CacheException("Failed to bind parameters: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
} catch (SQLException e) {
throw new IgniteSQLException(e);
} finally {
U.close(stmt, log);
}
}
if (log.isDebugEnabled())
log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry);
twoStepQry.pageSize(qry.getPageSize());
if (cancel == null)
cancel = new GridQueryCancel();
int[] partitions = qry.getPartitions();
if (partitions == null && twoStepQry.derivedPartitions() != null) {
try {
partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), qry.getArgs());
} catch (IgniteCheckedException e) {
throw new CacheException("Failed to calculate derived partitions: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
}
}
QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(runQueryTwoStep(schemaName, twoStepQry, keepBinary, enforceJoinOrder, qry.getTimeout(), cancel, qry.getArgs(), partitions), cancel);
cursor.fieldsMeta(meta);
if (cachedQry == null && !twoStepQry.explain()) {
cachedQry = new H2TwoStepCachedQuery(meta, twoStepQry.copy());
twoStepCache.putIfAbsent(cachedQryKey, cachedQry);
}
return cursor;
}
Aggregations