use of javax.cache.CacheException in project ignite by apache.
the class IgniteCacheDistributedQueryCancelSelfTest method testQueryResponseFailCode.
/** */
public void testQueryResponseFailCode() throws Exception {
try (Ignite client = startGrid("client")) {
CacheConfiguration<Integer, Integer> cfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
cfg.setSqlFunctionClasses(Functions.class);
cfg.setIndexedTypes(Integer.class, Integer.class);
cfg.setName("test");
IgniteCache<Integer, Integer> cache = client.getOrCreateCache(cfg);
cache.put(1, 1);
QueryCursor<List<?>> qry = cache.query(new SqlFieldsQuery("select fail() from Integer"));
try {
qry.getAll();
fail();
} catch (Exception e) {
assertTrue(e.getCause() instanceof CacheException);
}
}
}
use of javax.cache.CacheException in project ignite by apache.
the class IgniteClientReconnectCacheTest method checkOperationInProgressFails.
/**
* @param client Client.
* @param ccfg Cache configuration.
* @param msgToBlock Message to block.
* @param c Cache operation closure.
* @throws Exception If failed.
*/
private void checkOperationInProgressFails(IgniteEx client, final CacheConfiguration<Object, Object> ccfg, Class<?> msgToBlock, final IgniteInClosure<IgniteCache<Object, Object>> c) throws Exception {
Ignite srv = clientRouter(client);
TestTcpDiscoverySpi srvSpi = spi(srv);
final IgniteCache<Object, Object> cache = client.getOrCreateCache(ccfg);
for (int i = 0; i < SRV_CNT; i++) {
TestCommunicationSpi srvCommSpi = (TestCommunicationSpi) grid(i).configuration().getCommunicationSpi();
srvCommSpi.blockMessages(msgToBlock, client.localNode().id());
}
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
IgniteClientDisconnectedException e0 = null;
try {
c.apply(cache);
fail();
} catch (IgniteClientDisconnectedException e) {
log.info("Expected exception: " + e);
e0 = e;
} catch (CacheException e) {
log.info("Expected exception: " + e);
assertTrue("Unexpected cause: " + e.getCause(), e.getCause() instanceof IgniteClientDisconnectedException);
e0 = (IgniteClientDisconnectedException) e.getCause();
}
assertNotNull(e0);
assertNotNull(e0.reconnectFuture());
e0.reconnectFuture().get();
c.apply(cache);
return null;
}
});
Thread.sleep(1000);
assertNotDone(fut);
log.info("Fail client: " + client.localNode().id());
srvSpi.failNode(client.localNode().id(), null);
try {
fut.get();
} finally {
for (int i = 0; i < SRV_CNT; i++) ((TestCommunicationSpi) grid(i).configuration().getCommunicationSpi()).stopBlock(false);
}
cache.put(1, 1);
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
return cache.get(1) != null;
}
}, 5000);
assertEquals(1, cache.get(1));
}
use of javax.cache.CacheException in project ignite by apache.
the class IgniteCacheP2pUnmarshallingRebalanceErrorTest method testResponseMessageOnUnmarshallingFailed.
/** {@inheritDoc} */
@Override
public void testResponseMessageOnUnmarshallingFailed() throws Exception {
//GridDhtPartitionSupplyMessage unmarshalling failed test.
readCnt.set(Integer.MAX_VALUE);
for (int i = 0; i <= 20; i++) jcache(0).put(new TestKey(String.valueOf(++key)), "");
readCnt.set(1);
startGrid(3);
// GridDhtPartitionSupplyMessage unmarshalling failed but ioManager does not hangs up.
Thread.sleep(1000);
// GridDhtForceKeysRequest unmarshalling failed test.
stopGrid(3);
readCnt.set(Integer.MAX_VALUE);
for (int i = 0; i <= 100; i++) jcache(0).put(new TestKey(String.valueOf(++key)), "");
// Custom rebalanceDelay set at cfg.
startGrid(10);
Affinity<Object> aff = affinity(grid(10).cache(DEFAULT_CACHE_NAME));
GridCacheContext cctx = grid(10).context().cache().cache(DEFAULT_CACHE_NAME).context();
List<List<ClusterNode>> affAssign = cctx.affinity().assignment(cctx.affinity().affinityTopologyVersion()).idealAssignment();
Integer part = null;
ClusterNode node = grid(10).localNode();
for (int p = 0; p < aff.partitions(); p++) {
if (affAssign.get(p).get(0).equals(node)) {
part = p;
break;
}
}
assertNotNull(part);
long stopTime = U.currentTimeMillis() + 5000;
while (!part.equals(aff.partition(new TestKey(String.valueOf(key))))) {
--key;
if (U.currentTimeMillis() > stopTime)
fail();
}
readCnt.set(1);
try {
jcache(10).get(new TestKey(String.valueOf(key)));
assert false : "p2p marshalling failed, but error response was not sent";
} catch (CacheException e) {
assert X.hasCause(e, IOException.class);
}
}
use of javax.cache.CacheException in project ignite by apache.
the class GridReduceQueryExecutor method stableDataNodes.
/**
* @param isReplicatedOnly If we must only have replicated caches.
* @param topVer Topology version.
* @param cacheIds Participating cache IDs.
* @param parts Partitions.
* @return Data nodes or {@code null} if repartitioning started and we need to retry.
*/
private Map<ClusterNode, IntArray> stableDataNodes(boolean isReplicatedOnly, AffinityTopologyVersion topVer, List<Integer> cacheIds, int[] parts) {
GridCacheContext<?, ?> cctx = cacheContext(cacheIds.get(0));
Map<ClusterNode, IntArray> map = stableDataNodesMap(topVer, cctx, parts);
Set<ClusterNode> nodes = map.keySet();
if (F.isEmpty(map))
throw new CacheException("Failed to find data nodes for cache: " + cctx.name());
for (int i = 1; i < cacheIds.size(); i++) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheIds.get(i));
String extraCacheName = extraCctx.name();
if (extraCctx.isLocal())
// No consistency guaranties for local caches.
continue;
if (isReplicatedOnly && !extraCctx.isReplicated())
throw new CacheException("Queries running on replicated cache should not contain JOINs " + "with partitioned tables [replicatedCache=" + cctx.name() + ", partitionedCache=" + extraCacheName + "]");
Set<ClusterNode> extraNodes = stableDataNodesMap(topVer, extraCctx, parts).keySet();
if (F.isEmpty(extraNodes))
throw new CacheException("Failed to find data nodes for cache: " + extraCacheName);
if (isReplicatedOnly && extraCctx.isReplicated()) {
nodes.retainAll(extraNodes);
if (map.isEmpty()) {
if (isPreloadingActive(cacheIds))
// Retry.
return null;
else
throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
}
} else if (!isReplicatedOnly && extraCctx.isReplicated()) {
if (!extraNodes.containsAll(nodes))
if (isPreloadingActive(cacheIds))
// Retry.
return null;
else
throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
} else if (!isReplicatedOnly && !extraCctx.isReplicated()) {
if (!extraNodes.equals(nodes))
if (isPreloadingActive(cacheIds))
// Retry.
return null;
else
throw new CacheException("Caches have distinct sets of data nodes [cache1=" + cctx.name() + ", cache2=" + extraCacheName + "]");
} else
throw new IllegalStateException();
}
return map;
}
use of javax.cache.CacheException in project ignite by apache.
the class GridReduceQueryExecutor method partitionedUnstableDataNodes.
/**
* Calculates partition mapping for partitioned cache on unstable topology.
*
* @param cacheIds Cache IDs.
* @return Partition mapping or {@code null} if we can't calculate it due to repartitioning and we need to retry.
*/
@SuppressWarnings("unchecked")
private Map<ClusterNode, IntArray> partitionedUnstableDataNodes(List<Integer> cacheIds) {
// If the main cache is replicated, just replace it with the first partitioned.
GridCacheContext<?, ?> cctx = findFirstPartitioned(cacheIds);
final int partsCnt = cctx.affinity().partitions();
if (cacheIds.size() > 1) {
// Check correct number of partitions for partitioned caches.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
int parts = extraCctx.affinity().partitions();
if (parts != partsCnt)
throw new CacheException("Number of partitions must be the same for correct collocation [cache1=" + cctx.name() + ", parts1=" + partsCnt + ", cache2=" + extraCctx.name() + ", parts2=" + parts + "]");
}
}
Set<ClusterNode>[] partLocs = new Set[partsCnt];
// Fill partition locations for main cache.
for (int p = 0; p < partsCnt; p++) {
List<ClusterNode> owners = cctx.topology().owners(p);
if (F.isEmpty(owners)) {
// Handle special case: no mapping is configured for a partition.
if (F.isEmpty(cctx.affinity().assignment(NONE).get(p))) {
// Mark unmapped partition.
partLocs[p] = UNMAPPED_PARTS;
continue;
} else if (!F.isEmpty(dataNodes(cctx.name(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + cctx.name() + ", part=" + p + "]");
}
partLocs[p] = new HashSet<>(owners);
}
if (cacheIds.size() > 1) {
// We need this for logical collocation between different partitioned caches with the same affinity.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
// This is possible if we have replaced a replicated cache with a partitioned one earlier.
if (cctx == extraCctx)
continue;
if (extraCctx.isReplicated() || extraCctx.isLocal())
continue;
for (int p = 0, parts = extraCctx.affinity().partitions(); p < parts; p++) {
List<ClusterNode> owners = extraCctx.topology().owners(p);
if (partLocs[p] == UNMAPPED_PARTS)
// Skip unmapped partitions.
continue;
if (F.isEmpty(owners)) {
if (!F.isEmpty(dataNodes(extraCctx.name(), NONE)))
// Retry.
return null;
throw new CacheException("Failed to find data nodes [cache=" + extraCctx.name() + ", part=" + p + "]");
}
if (partLocs[p] == null)
partLocs[p] = new HashSet<>(owners);
else {
// Intersection of owners.
partLocs[p].retainAll(owners);
if (partLocs[p].isEmpty())
// Intersection is empty -> retry.
return null;
}
}
}
// Filter nodes where not all the replicated caches loaded.
for (Integer cacheId : cacheIds) {
GridCacheContext<?, ?> extraCctx = cacheContext(cacheId);
if (!extraCctx.isReplicated())
continue;
Set<ClusterNode> dataNodes = replicatedUnstableDataNodes(extraCctx);
if (F.isEmpty(dataNodes))
// Retry.
return null;
for (Set<ClusterNode> partLoc : partLocs) {
if (partLoc == UNMAPPED_PARTS)
// Skip unmapped partition.
continue;
partLoc.retainAll(dataNodes);
if (partLoc.isEmpty())
// Retry.
return null;
}
}
}
// Collect the final partitions mapping.
Map<ClusterNode, IntArray> res = new HashMap<>();
// Here partitions in all IntArray's will be sorted in ascending order, this is important.
for (int p = 0; p < partLocs.length; p++) {
Set<ClusterNode> pl = partLocs[p];
// Skip unmapped partitions.
if (pl == UNMAPPED_PARTS)
continue;
assert !F.isEmpty(pl) : pl;
ClusterNode n = pl.size() == 1 ? F.first(pl) : F.rand(pl);
IntArray parts = res.get(n);
if (parts == null)
res.put(n, parts = new IntArray());
parts.add(p);
}
return res;
}
Aggregations