use of org.apache.ignite.cache.query.QueryCursor in project ignite by apache.
the class CustomersClusterizationExample method computeMeanEntropy.
/**
* Computes mean entropy in clusters.
*
* @param cache Dataset cache.
* @param filter Test dataset filter.
* @param vectorizer Upstream vectorizer.
* @param mdl KMeans model.
* @return Score.
*/
private static double computeMeanEntropy(IgniteCache<Integer, Vector> cache, IgniteBiPredicate<Integer, Vector> filter, Vectorizer<Integer, Vector, Integer, Double> vectorizer, KMeansModel mdl) {
Map<Integer, Map<Integer, AtomicInteger>> clusterUniqueLbCounts = new HashMap<>();
try (QueryCursor<Cache.Entry<Integer, Vector>> cursor = cache.query(new ScanQuery<>(filter))) {
for (Cache.Entry<Integer, Vector> ent : cursor) {
LabeledVector<Double> vec = vectorizer.apply(ent.getKey(), ent.getValue());
int cluster = mdl.predict(vec.features());
int ch = vec.label().intValue();
if (!clusterUniqueLbCounts.containsKey(cluster))
clusterUniqueLbCounts.put(cluster, new HashMap<>());
if (!clusterUniqueLbCounts.get(cluster).containsKey(ch))
clusterUniqueLbCounts.get(cluster).put(ch, new AtomicInteger());
clusterUniqueLbCounts.get(cluster).get(ch).incrementAndGet();
}
}
double sumOfClusterEntropies = 0.0;
for (Integer cluster : clusterUniqueLbCounts.keySet()) {
Map<Integer, AtomicInteger> lbCounters = clusterUniqueLbCounts.get(cluster);
int sizeOfCluster = lbCounters.values().stream().mapToInt(AtomicInteger::get).sum();
double entropyInCluster = lbCounters.values().stream().mapToDouble(AtomicInteger::get).map(lblsCount -> lblsCount / sizeOfCluster).map(lblProb -> -lblProb * Math.log(lblProb)).sum();
sumOfClusterEntropies += entropyInCluster;
}
return sumOfClusterEntropies / clusterUniqueLbCounts.size();
}
use of org.apache.ignite.cache.query.QueryCursor in project ignite by apache.
the class JdbcQueryMultipleStatementsTask method call.
/**
* {@inheritDoc}
*/
@Override
public List<JdbcStatementResultInfo> call() throws Exception {
SqlFieldsQuery qry = (isQry != null ? new SqlFieldsQueryEx(sql, isQry) : new SqlFieldsQuery(sql)).setArgs(args);
qry.setPageSize(fetchSize);
qry.setLocal(locQry);
qry.setCollocated(collocatedQry);
qry.setDistributedJoins(distributedJoins);
qry.setEnforceJoinOrder(enforceJoinOrder);
qry.setLazy(lazy);
qry.setSchema(schemaName);
if (!F.isEmpty(queryInitiatorId()))
qry.setQueryInitiatorId(queryInitiatorId());
GridKernalContext ctx = ((IgniteKernal) ignite).context();
List<FieldsQueryCursor<List<?>>> curs = ctx.query().querySqlFields(qry, true, !allowMultipleStatements());
List<JdbcStatementResultInfo> resultsInfo = new ArrayList<>(curs.size());
for (FieldsQueryCursor<List<?>> cur0 : curs) {
if (cur0 instanceof BulkLoadContextCursor) {
curs.forEach(QueryCursor::close);
throw new SQLException("COPY command is currently supported only in thin JDBC driver.");
}
QueryCursorImpl<List<?>> cur = (QueryCursorImpl<List<?>>) cur0;
long updCnt = -1;
UUID qryId = null;
if (!cur.isQuery()) {
List<List<?>> items = cur.getAll();
assert items != null && items.size() == 1 && items.get(0).size() == 1 && items.get(0).get(0) instanceof Long : "Invalid result set for not-SELECT query. [qry=" + sql + ", res=" + S.toString(List.class, items) + ']';
updCnt = (Long) items.get(0).get(0);
cur.close();
} else {
qryId = UUID.randomUUID();
JdbcQueryTask.Cursor jdbcCur = new JdbcQueryTask.Cursor(cur, cur.iterator());
JdbcQueryTask.addCursor(qryId, jdbcCur);
if (!loc)
JdbcQueryTask.scheduleRemoval(qryId);
}
JdbcStatementResultInfo resInfo = new JdbcStatementResultInfo(cur.isQuery(), qryId, updCnt);
resultsInfo.add(resInfo);
}
return resultsInfo;
}
use of org.apache.ignite.cache.query.QueryCursor in project ignite by apache.
the class ClientCacheScanQueryRequest method process.
/**
* {@inheritDoc}
*/
@Override
public ClientResponse process(ClientConnectionContext ctx) {
IgniteCache cache = filterPlatform == ClientPlatform.JAVA && !isKeepBinary() ? rawCache(ctx) : cache(ctx);
ScanQuery qry = new ScanQuery().setLocal(loc).setPageSize(pageSize).setPartition(part).setFilter(createFilter(ctx.kernalContext(), filterObj, filterPlatform));
ctx.incrementCursors();
try {
QueryCursor cur = cache.query(qry);
ClientCacheEntryQueryCursor cliCur = new ClientCacheEntryQueryCursor(cur, pageSize, ctx);
long cursorId = ctx.resources().put(cliCur);
cliCur.id(cursorId);
return new ClientCacheQueryResponse(requestId(), cliCur);
} catch (Exception e) {
ctx.decrementCursors();
throw e;
}
}
use of org.apache.ignite.cache.query.QueryCursor in project ignite by apache.
the class ClientCacheQueryContinuousRequest method process.
/**
* {@inheritDoc}
*/
@Override
public ClientResponse process(ClientConnectionContext ctx) {
qry.setRemoteFilterFactory(getFilterFactory(ctx));
ctx.incrementCursors();
try {
IgniteCache cache = filterPlatform == ClientPlatform.JAVA && !isKeepBinary() ? rawCache(ctx) : cache(ctx);
ClientCacheQueryContinuousHandle handle = new ClientCacheQueryContinuousHandle(ctx);
qry.setLocalListener(handle);
QueryCursor cur = cache.query(qry);
long cursorId = ctx.resources().put(handle);
handle.setCursor(cur);
return new ClientCacheQueryContinuousResponse(requestId(), handle, cursorId);
} catch (Exception e) {
ctx.decrementCursors();
throw e;
}
}
use of org.apache.ignite.cache.query.QueryCursor in project ignite by apache.
the class IgniteCacheGroupsTest method continuousQuery.
/**
* @param cacheMode Cache mode.
* @param atomicityMode Cache atomicity mode.
* @throws Exception If failed.
*/
private void continuousQuery(CacheMode cacheMode, CacheAtomicityMode atomicityMode) throws Exception {
final int keys = 10_000;
Integer[] data1 = generateData(keys);
Integer[] data2 = generateData(keys);
boolean loc = cacheMode == LOCAL;
if (loc)
startGrid(0);
else
startGridsMultiThreaded(4);
Ignite srv0 = ignite(0);
srv0.createCache(cacheConfiguration(GROUP1, CACHE1, cacheMode, atomicityMode, 2, false));
srv0.createCache(cacheConfiguration(GROUP1, CACHE2, cacheMode, atomicityMode, 2, false));
final AtomicInteger cntr1 = new AtomicInteger();
final AtomicInteger cntr2 = new AtomicInteger();
CacheEntryUpdatedListener lsnr1 = new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends Integer> ignored : evts) cntr1.incrementAndGet();
}
};
CacheEntryUpdatedListener lsnr2 = new CacheEntryUpdatedListener<Integer, Integer>() {
@Override
public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) {
for (CacheEntryEvent<? extends Integer, ? extends Integer> ignored : evts) cntr2.incrementAndGet();
}
};
QueryCursor qry1 = ignite(loc ? 0 : 2).cache(CACHE1).query(new ContinuousQuery<>().setLocalListener(lsnr1));
QueryCursor qry2 = ignite(loc ? 0 : 3).cache(CACHE2).query(new ContinuousQuery<>().setLocalListener(lsnr2));
if (atomicityMode == TRANSACTIONAL) {
Ignite ignite = ignite(loc ? 0 : 1);
IgniteCache<Integer, Integer> cache1 = ignite.cache(CACHE1);
IgniteCache<Integer, Integer> cache2 = ignite.cache(CACHE2);
try (Transaction tx = ignite.transactions().txStart()) {
for (int i = 0; i < keys; i++) {
cache1.put(i, data1[i]);
cache2.put(i, data2[i]);
}
tx.commit();
}
} else {
int ldrs = 4;
List<Callable<?>> cls = new ArrayList<>(ldrs * 2);
for (int i = 0; i < ldrs; i++) {
cls.add(putOperation(loc ? 0 : 1, ldrs, i, CACHE1, data1));
cls.add(putOperation(loc ? 0 : 2, ldrs, i, CACHE2, data2));
}
GridTestUtils.runMultiThreaded(cls, "loaders");
}
GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return cntr1.get() == keys && cntr2.get() == keys;
}
}, 2000);
assertEquals(cntr1.get(), keys);
assertEquals(cntr2.get(), keys);
qry1.close();
Map<Integer, Integer> map = generateDataMap(10);
srv0.cache(CACHE1).putAll(map);
srv0.cache(CACHE2).putAll(map);
GridTestUtils.waitForCondition(new PA() {
@Override
public boolean apply() {
return cntr2.get() == keys + 10;
}
}, 2000);
assertEquals(keys + 10, cntr2.get());
assertEquals(keys, cntr1.get());
qry2.close();
}
Aggregations