use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class DmlStatementsProcessor method executeUpdateStatement.
/**
* Actually perform SQL DML operation locally.
*
* @param schemaName Schema name.
* @param cctx Cache context.
* @param prepStmt Prepared statement for DML query.
* @param fieldsQry Fields query.
* @param filters Cache name and key filter.
* @param failedKeys Keys to restrict UPDATE and DELETE operations with. Null or empty array means no restriction.
* @return Pair [number of successfully processed items; keys that have failed to be processed]
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "ConstantConditions", "unchecked" })
private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, PreparedStatement prepStmt, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel, Object[] failedKeys) throws IgniteCheckedException {
int mainCacheId = CU.cacheId(cctx.name());
Integer errKeysPos = null;
UpdatePlan plan = getPlanForStatement(schemaName, prepStmt, errKeysPos);
if (plan.fastUpdateArgs != null) {
assert F.isEmpty(failedKeys) && errKeysPos == null;
return doFastUpdate(plan, fieldsQry.getArgs());
}
assert !F.isEmpty(plan.selectQry);
QueryCursorImpl<List<?>> cur;
// subquery and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocSubqry) {
SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQry, fieldsQry.isCollocated()).setArgs(fieldsQry.getArgs()).setDistributedJoins(fieldsQry.isDistributedJoins()).setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder()).setLocal(fieldsQry.isLocal()).setPageSize(fieldsQry.getPageSize()).setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS);
cur = (QueryCursorImpl<List<?>>) idx.queryDistributedSqlFields(schemaName, newFieldsQry, true, cancel, mainCacheId);
} else {
final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQry, F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel);
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel);
}
int pageSize = loc ? 0 : fieldsQry.getPageSize();
switch(plan.mode) {
case MERGE:
return new UpdateResult(doMerge(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY);
case INSERT:
return new UpdateResult(doInsert(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY);
case UPDATE:
return doUpdate(plan, cur, pageSize);
case DELETE:
return doDelete(cctx, cur, pageSize);
default:
throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode + ']', IgniteQueryErrorCode.UNEXPECTED_OPERATION);
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class StreamTransformerExample method main.
public static void main(String[] args) throws Exception {
// Mark this cluster member as client.
Ignition.setClientMode(true);
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
if (!ExamplesUtils.hasServerNodes(ignite))
return;
CacheConfiguration<Integer, Long> cfg = new CacheConfiguration<>(CACHE_NAME);
// Index key and value.
cfg.setIndexedTypes(Integer.class, Long.class);
// Auto-close cache at the end of the example.
try (IgniteCache<Integer, Long> stmCache = ignite.getOrCreateCache(cfg)) {
try (IgniteDataStreamer<Integer, Long> stmr = ignite.dataStreamer(stmCache.getName())) {
// Allow data updates.
stmr.allowOverwrite(true);
// Configure data transformation to count random numbers added to the stream.
stmr.receiver(StreamTransformer.from((e, arg) -> {
// Get current count.
Long val = e.getValue();
// Increment count by 1.
e.setValue(val == null ? 1L : val + 1);
return null;
}));
// Stream 10 million of random numbers into the streamer cache.
for (int i = 1; i <= 10_000_000; i++) {
stmr.addData(RAND.nextInt(RANGE), 1L);
if (i % 500_000 == 0)
System.out.println("Number of tuples streamed into Ignite: " + i);
}
}
// Query top 10 most popular numbers every.
SqlFieldsQuery top10Qry = new SqlFieldsQuery("select _key, _val from Long order by _val desc limit 10");
// Execute queries.
List<List<?>> top10 = stmCache.query(top10Qry).getAll();
System.out.println("Top 10 most popular numbers:");
// Print top 10 words.
ExamplesUtils.printQueryResults(top10);
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(CACHE_NAME);
}
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class StreamVisitorExample method main.
public static void main(String[] args) throws Exception {
// Mark this cluster member as client.
Ignition.setClientMode(true);
try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
if (!ExamplesUtils.hasServerNodes(ignite))
return;
// Market data cache with default configuration.
CacheConfiguration<String, Double> mktDataCfg = new CacheConfiguration<>("marketTicks");
// Financial instrument cache configuration.
CacheConfiguration<String, Instrument> instCfg = new CacheConfiguration<>("instCache");
// Index key and value for querying financial instruments.
// Note that Instrument class has @QuerySqlField annotation for secondary field indexing.
instCfg.setIndexedTypes(String.class, Instrument.class);
// Auto-close caches at the end of the example.
try (IgniteCache<String, Double> mktCache = ignite.getOrCreateCache(mktDataCfg);
IgniteCache<String, Instrument> instCache = ignite.getOrCreateCache(instCfg)) {
try (IgniteDataStreamer<String, Double> mktStmr = ignite.dataStreamer(mktCache.getName())) {
// Note that we receive market data, but do not populate 'mktCache' (it remains empty).
// Instead we update the instruments in the 'instCache'.
// Since both, 'instCache' and 'mktCache' use the same key, updates are collocated.
mktStmr.receiver(StreamVisitor.from((cache, e) -> {
String symbol = e.getKey();
Double tick = e.getValue();
Instrument inst = instCache.get(symbol);
if (inst == null)
inst = new Instrument(symbol);
// Don't populate market cache, as we don't use it for querying.
// Update cached instrument based on the latest market tick.
inst.update(tick);
instCache.put(symbol, inst);
}));
// Stream 10 million market data ticks into the system.
for (int i = 1; i <= 10_000_000; i++) {
int idx = RAND.nextInt(INSTRUMENTS.length);
// Use gaussian distribution to ensure that
// numbers closer to 0 have higher probability.
double price = round2(INITIAL_PRICES[idx] + RAND.nextGaussian());
mktStmr.addData(INSTRUMENTS[idx], price);
if (i % 500_000 == 0)
System.out.println("Number of tuples streamed into Ignite: " + i);
}
}
// Select top 3 best performing instruments.
SqlFieldsQuery top3qry = new SqlFieldsQuery("select symbol, (latest - open) from Instrument order by (latest - open) desc limit 3");
// Execute queries.
List<List<?>> top3 = instCache.query(top3qry).getAll();
System.out.println("Top performing financial instruments: ");
// Print top 10 words.
ExamplesUtils.printQueryResults(top3);
} finally {
// Distributed cache could be removed from cluster only by #destroyCache() call.
ignite.destroyCache(mktDataCfg.getName());
ignite.destroyCache(instCfg.getName());
}
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class IgniteCacheProxy method convertToBinary.
/**
* Convert query arguments to BinaryObjects if binary marshaller used.
*
* @param qry Query.
*/
private void convertToBinary(final Query qry) {
if (ctx.binaryMarshaller()) {
if (qry instanceof SqlQuery) {
final SqlQuery sqlQry = (SqlQuery) qry;
convertToBinary(sqlQry.getArgs());
} else if (qry instanceof SpiQuery) {
final SpiQuery spiQry = (SpiQuery) qry;
convertToBinary(spiQry.getArgs());
} else if (qry instanceof SqlFieldsQuery) {
final SqlFieldsQuery fieldsQry = (SqlFieldsQuery) qry;
convertToBinary(fieldsQry.getArgs());
}
}
}
use of org.apache.ignite.cache.query.SqlFieldsQuery in project ignite by apache.
the class IgnitePdsMultiNodePutGetRestartTest method checkPutGetSql.
/**
* @param ig Ig.
* @param write Write.
*/
private void checkPutGetSql(IgniteEx ig, boolean write) {
IgniteCache<Integer, DbValue> cache = ig.cache(null);
if (write) {
try (IgniteDataStreamer<Object, Object> streamer = ig.dataStreamer(null)) {
for (int i = 0; i < 10_000; i++) streamer.addData(i, new DbValue(i, "value-" + i, i));
}
}
List<List<?>> res = cache.query(new SqlFieldsQuery("select ival from dbvalue where ival < ? order by ival asc").setArgs(10_000)).getAll();
assertEquals(10_000, res.size());
for (int i = 0; i < 10_000; i++) {
assertEquals(1, res.get(i).size());
assertEquals(i, res.get(i).get(0));
}
assertEquals(1, cache.query(new SqlFieldsQuery("select lval from dbvalue where ival = 7899")).getAll().size());
assertEquals(5000, cache.query(new SqlFieldsQuery("select lval from dbvalue where ival >= 5000 and ival < 10000")).getAll().size());
for (int i = 0; i < 10_000; i++) assertEquals(new DbValue(i, "value-" + i, i), cache.get(i));
}
Aggregations