use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class IgniteCacheProxy method convertToBinary.
/**
* Convert query arguments to BinaryObjects if binary marshaller used.
*
* @param qry Query.
*/
private void convertToBinary(final Query qry) {
if (ctx.binaryMarshaller()) {
if (qry instanceof SqlQuery) {
final SqlQuery sqlQry = (SqlQuery) qry;
convertToBinary(sqlQry.getArgs());
} else if (qry instanceof SpiQuery) {
final SpiQuery spiQry = (SpiQuery) qry;
convertToBinary(spiQry.getArgs());
} else if (qry instanceof SqlFieldsQuery) {
final SqlFieldsQuery fieldsQry = (SqlFieldsQuery) qry;
convertToBinary(fieldsQry.getArgs());
}
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class OdbcRequestHandler method executeQuery.
/**
* {@link OdbcQueryExecuteRequest} command handler.
*
* @param req Execute query request.
* @return Response.
*/
private SqlListenerResponse executeQuery(OdbcQueryExecuteRequest req) {
int cursorCnt = qryCursors.size();
if (maxCursors > 0 && cursorCnt >= maxCursors)
return new OdbcResponse(SqlListenerResponse.STATUS_FAILED, "Too many opened cursors (either close other " + "opened cursors or increase the limit through OdbcConfiguration.setMaxOpenCursors()) " + "[maximum=" + maxCursors + ", current=" + cursorCnt + ']');
long qryId = QRY_ID_GEN.getAndIncrement();
try {
String sql = OdbcEscapeUtils.parse(req.sqlQuery());
if (log.isDebugEnabled())
log.debug("ODBC query parsed [reqId=" + req.requestId() + ", original=" + req.sqlQuery() + ", parsed=" + sql + ']');
SqlFieldsQuery qry = new SqlFieldsQuery(sql);
qry.setArgs(req.arguments());
qry.setDistributedJoins(distributedJoins);
qry.setEnforceJoinOrder(enforceJoinOrder);
IgniteCache<Object, Object> cache0 = ctx.grid().cache(req.cacheName());
if (cache0 == null)
return new OdbcResponse(SqlListenerResponse.STATUS_FAILED, "Cache doesn't exist (did you configure it?): " + req.cacheName());
IgniteCache<Object, Object> cache = cache0.withKeepBinary();
if (cache == null)
return new OdbcResponse(SqlListenerResponse.STATUS_FAILED, "Can not get cache with keep binary: " + req.cacheName());
QueryCursor qryCur = cache.query(qry);
qryCursors.put(qryId, new IgniteBiTuple<QueryCursor, Iterator>(qryCur, null));
List<?> fieldsMeta = ((QueryCursorImpl) qryCur).fieldsMeta();
OdbcQueryExecuteResult res = new OdbcQueryExecuteResult(qryId, convertMetadata(fieldsMeta));
return new OdbcResponse(res);
} catch (Exception e) {
qryCursors.remove(qryId);
U.error(log, "Failed to execute SQL query [reqId=" + req.requestId() + ", req=" + req + ']', e);
return new OdbcResponse(SqlListenerResponse.STATUS_FAILED, e.toString());
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class H2IndexingAbstractGeoSelfTest method createDynamicIndex.
/**
* Create dynamic index.
*
* @param cache Cache.
* @param entity Entity.
* @param idx Index.
* @throws Exception If failed.
*/
private void createDynamicIndex(IgniteCache cache, QueryEntity entity, QueryIndex idx) throws Exception {
boolean spatial = idx.getIndexType() == QueryIndexType.GEOSPATIAL;
GridStringBuilder sb = new SB("CREATE ").a(spatial ? "SPATIAL " : "").a("INDEX ").a("\"" + idx.getName() + "\"").a(" ON ").a(QueryUtils.tableName(entity)).a(" (");
boolean first = true;
for (Map.Entry<String, Boolean> fieldEntry : idx.getFields().entrySet()) {
if (first)
first = false;
else
sb.a(", ");
String name = fieldEntry.getKey();
boolean asc = fieldEntry.getValue();
sb.a("\"" + name + "\"").a(" ").a(asc ? "ASC" : "DESC");
}
sb.a(')');
String sql = sb.toString();
cache.query(new SqlFieldsQuery(sql)).getAll();
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class H2IndexingAbstractGeoSelfTest method checkGeo.
/**
* Check geo-index (dynamic).
*
* @param dynamic Whether index should be created dynamically.
* @throws Exception If failed.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private void checkGeo(boolean dynamic) throws Exception {
IgniteCache<Integer, EnemyCamp> cache = createCache("camp", true, Integer.class, EnemyCamp.class, dynamic);
try {
WKTReader r = new WKTReader();
cache.getAndPut(0, new EnemyCamp(r.read("POINT(25 75)"), "A"));
cache.getAndPut(1, new EnemyCamp(r.read("POINT(70 70)"), "B"));
cache.getAndPut(2, new EnemyCamp(r.read("POINT(70 30)"), "C"));
cache.getAndPut(3, new EnemyCamp(r.read("POINT(75 25)"), "D"));
SqlQuery<Integer, EnemyCamp> qry = new SqlQuery(EnemyCamp.class, "coords && ?");
Collection<Cache.Entry<Integer, EnemyCamp>> res = cache.query(qry.setArgs(r.read("POLYGON((5 70, 5 80, 30 80, 30 70, 5 70))"))).getAll();
checkPoints(res, "A");
res = cache.query(qry.setArgs(r.read("POLYGON((10 5, 10 35, 70 30, 75 25, 10 5))"))).getAll();
checkPoints(res, "C", "D");
// Move B to the first polygon.
cache.getAndPut(1, new EnemyCamp(r.read("POINT(20 75)"), "B"));
res = cache.query(qry.setArgs(r.read("POLYGON((5 70, 5 80, 30 80, 30 70, 5 70))"))).getAll();
checkPoints(res, "A", "B");
// Move B to the second polygon.
cache.getAndPut(1, new EnemyCamp(r.read("POINT(30 30)"), "B"));
res = cache.query(qry.setArgs(r.read("POLYGON((10 5, 10 35, 70 30, 75 25, 10 5))"))).getAll();
checkPoints(res, "B", "C", "D");
// Remove B.
cache.getAndRemove(1);
res = cache.query(qry.setArgs(r.read("POLYGON((5 70, 5 80, 30 80, 30 70, 5 70))"))).getAll();
checkPoints(res, "A");
res = cache.query(qry.setArgs(r.read("POLYGON((10 5, 10 35, 70 30, 75 25, 10 5))"))).getAll();
checkPoints(res, "C", "D");
// Check explain request.
String plan = cache.query(new SqlFieldsQuery("explain select * from EnemyCamp " + "where coords && 'POINT(25 75)'")).getAll().get(0).get(0).toString().toLowerCase();
assertTrue("__ explain: " + plan, plan.contains("coords_idx"));
if (dynamic)
cache.query(new SqlFieldsQuery("DROP INDEX \"EnemyCamp_coords_idx\"")).getAll();
} finally {
destroy(cache, grid(0), dynamic);
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class IgniteCacheJoinPartitionedAndReplicatedTest method checkQuery.
/**
* @param sql SQL.
* @param cache Cache.
* @param expSize Expected results size.
* @param args Arguments.
*/
private void checkQuery(String sql, IgniteCache<Object, Object> cache, int expSize, Object... args) {
String plan = (String) cache.query(new SqlFieldsQuery("explain " + sql)).getAll().get(0).get(0);
log.info("Plan: " + plan);
SqlFieldsQuery qry = new SqlFieldsQuery(sql);
qry.setArgs(args);
QueryCursor<List<?>> cur = cache.query(qry);
List<List<?>> res = cur.getAll();
if (expSize != res.size())
log.info("Results: " + res);
assertEquals(expSize, res.size());
}
Aggregations