use of org.apache.ignite.cache.query.Query in project camel by apache.
the class IgniteCacheProducer method doQuery.
@SuppressWarnings("unchecked")
private void doQuery(Message in, Message out, Exchange exchange) {
Query<Object> query = in.getHeader(IgniteConstants.IGNITE_CACHE_QUERY, Query.class);
if (query == null) {
try {
query = in.getMandatoryBody(Query.class);
} catch (InvalidPayloadException e) {
exchange.setException(e);
return;
}
}
final QueryCursor<Object> cursor = cache.query(query);
out.setBody(cursor.iterator());
exchange.addOnCompletion(new Synchronization() {
@Override
public void onFailure(Exchange exchange) {
cursor.close();
}
@Override
public void onComplete(Exchange exchange) {
cursor.close();
}
});
}
use of org.apache.ignite.cache.query.Query in project ignite by apache.
the class IgniteRepositoryQuery method execute.
/**
* {@inheritDoc}
*/
@Override
public Object execute(Object[] prmtrs) {
Query qry = prepareQuery(prmtrs);
QueryCursor qryCursor = cache.query(qry);
return transformQueryCursor(prmtrs, qryCursor);
}
use of org.apache.ignite.cache.query.Query in project ignite by apache.
the class ReliabilityTest method testFailover.
/**
* Thin clint failover.
*/
@Test
public void testFailover() throws Exception {
if (isPartitionAware())
return;
final int CLUSTER_SIZE = 3;
try (LocalIgniteCluster cluster = LocalIgniteCluster.start(CLUSTER_SIZE);
IgniteClient client = Ignition.startClient(getClientConfiguration().setReconnectThrottlingRetries(// Disable throttling.
0).setAddresses(cluster.clientAddresses().toArray(new String[CLUSTER_SIZE])))) {
final Random rnd = new Random();
final ClientCache<Integer, String> cache = client.getOrCreateCache(new ClientCacheConfiguration().setName("testFailover").setCacheMode(CacheMode.REPLICATED));
// Simple operation failover: put/get
assertOnUnstableCluster(cluster, () -> {
Integer key = rnd.nextInt();
String val = key.toString();
cachePut(cache, key, val);
String cachedVal = cache.get(key);
assertEquals(val, cachedVal);
});
cache.clear();
// Composite operation failover: query
Map<Integer, String> data = IntStream.rangeClosed(1, 1000).boxed().collect(Collectors.toMap(i -> i, i -> String.format("String %s", i)));
assertOnUnstableCluster(cluster, () -> {
cache.putAll(data);
Query<Cache.Entry<Integer, String>> qry = new ScanQuery<Integer, String>().setPageSize(data.size() / 10);
try {
try (QueryCursor<Cache.Entry<Integer, String>> cur = cache.query(qry)) {
List<Cache.Entry<Integer, String>> res = cur.getAll();
assertEquals("Unexpected number of entries", data.size(), res.size());
Map<Integer, String> act = res.stream().collect(Collectors.toMap(Cache.Entry::getKey, Cache.Entry::getValue));
assertEquals("Unexpected entries", data, act);
}
} catch (ClientConnectionException ignored) {
// QueryCursor.getAll always executes on the same channel where the cursor is open,
// so failover is not possible, and the call will fail when connection drops.
}
});
// Client fails if all nodes go down
cluster.close();
boolean igniteUnavailable = false;
try {
cachePut(cache, 1, "1");
} catch (ClientConnectionException ex) {
igniteUnavailable = true;
Throwable[] suppressed = ex.getSuppressed();
assertEquals(CLUSTER_SIZE - 1, suppressed.length);
assertTrue(Stream.of(suppressed).allMatch(t -> t instanceof ClientConnectionException));
}
assertTrue(igniteUnavailable);
}
}
use of org.apache.ignite.cache.query.Query in project ignite by apache.
the class GridReduceQueryExecutor method createReduceQueryRun.
/**
* Query run factory method.
*
* @param conn H2 connection.
* @param mapQueries Map queries.
* @param nodes Target nodes.
* @param pageSize Page size.
* @param nodeToSegmentsCnt Segments per-index.
* @param skipMergeTbl Skip merge table flag.
* @param explain Explain query flag.
* @param dataPageScanEnabled DataPage scan enabled flag.
* @return Reduce query run.
*/
@NotNull
private ReduceQueryRun createReduceQueryRun(H2PooledConnection conn, List<GridCacheSqlQuery> mapQueries, Collection<ClusterNode> nodes, int pageSize, Map<ClusterNode, Integer> nodeToSegmentsCnt, boolean skipMergeTbl, boolean explain, Boolean dataPageScanEnabled) {
final ReduceQueryRun r = new ReduceQueryRun(mapQueries.size(), pageSize, dataPageScanEnabled);
int tblIdx = 0;
int replicatedQrysCnt = 0;
for (GridCacheSqlQuery mapQry : mapQueries) {
Reducer reducer;
if (skipMergeTbl)
reducer = UnsortedOneWayReducer.createDummy(ctx);
else {
ReduceTable tbl;
try {
tbl = createMergeTable(conn, mapQry, explain);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
reducer = tbl.getReducer();
fakeTable(conn, tblIdx++).innerTable(tbl);
}
// If the query has only replicated tables, we have to run it on a single node only.
if (!mapQry.isPartitioned()) {
ClusterNode node = F.rand(nodes);
mapQry.node(node.id());
replicatedQrysCnt++;
// Replicated tables can have only 1 segment.
reducer.setSources(singletonMap(node, 1));
} else
reducer.setSources(nodeToSegmentsCnt);
reducer.setPageSize(r.pageSize());
r.reducers().add(reducer);
}
int cnt = nodeToSegmentsCnt.values().stream().mapToInt(i -> i).sum();
r.init((r.reducers().size() - replicatedQrysCnt) * cnt + replicatedQrysCnt);
return r;
}
use of org.apache.ignite.cache.query.Query in project ignite by apache.
the class PerformanceStatisticsQueryTest method runQueryAndCheck.
/**
* Runs query and checks statistics.
*/
private void runQueryAndCheck(GridCacheQueryType expType, Query<?> qry, String expText, boolean hasLogicalReads, boolean hasPhysicalReads) throws Exception {
long startTime = U.currentTimeMillis();
cleanPerformanceStatisticsDir();
startCollectStatistics();
Collection<UUID> expNodeIds = new ArrayList<>();
if (clientType == SERVER) {
srv.cache(DEFAULT_CACHE_NAME).query(qry).getAll();
expNodeIds.add(srv.localNode().id());
} else if (clientType == CLIENT) {
client.cache(DEFAULT_CACHE_NAME).query(qry).getAll();
expNodeIds.add(client.localNode().id());
} else if (clientType == THIN_CLIENT) {
thinClient.cache(DEFAULT_CACHE_NAME).query(qry).getAll();
expNodeIds.addAll(F.nodeIds(client.cluster().forServers().nodes()));
}
Set<UUID> readsNodes = new HashSet<>();
if (hasLogicalReads)
srv.cluster().forServers().nodes().forEach(node -> readsNodes.add(node.id()));
AtomicInteger queryCnt = new AtomicInteger();
AtomicInteger readsCnt = new AtomicInteger();
HashSet<Long> qryIds = new HashSet<>();
stopCollectStatisticsAndRead(new TestHandler() {
@Override
public void query(UUID nodeId, GridCacheQueryType type, String text, long id, long queryStartTime, long duration, boolean success) {
queryCnt.incrementAndGet();
qryIds.add(id);
assertTrue(expNodeIds.contains(nodeId));
assertEquals(expType, type);
assertEquals(expText, text);
assertTrue(queryStartTime >= startTime);
assertTrue(duration >= 0);
assertTrue(success);
}
@Override
public void queryReads(UUID nodeId, GridCacheQueryType type, UUID queryNodeId, long id, long logicalReads, long physicalReads) {
readsCnt.incrementAndGet();
qryIds.add(id);
readsNodes.remove(nodeId);
assertTrue(expNodeIds.contains(queryNodeId));
assertEquals(expType, type);
assertTrue(logicalReads > 0);
assertTrue(hasPhysicalReads ? physicalReads > 0 : physicalReads == 0);
}
});
assertEquals(1, queryCnt.get());
assertTrue("Query reads expected on nodes: " + readsNodes, readsNodes.isEmpty());
assertEquals(1, qryIds.size());
}
Aggregations