use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class DmlStatementsProcessor method doInsert.
/**
* Execute INSERT statement plan.
* @param cursor Cursor to take inserted data from.
* @param pageSize Batch size for streaming, anything <= 0 for single page operations.
* @return Number of items affected.
* @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private long doInsert(UpdatePlan plan, Iterable<List<?>> cursor, int pageSize) throws IgniteCheckedException {
GridCacheContext cctx = plan.cacheContext();
// If we have just one item to put, just do so
if (plan.rowCount() == 1) {
IgniteBiTuple t = plan.processRow(cursor.iterator().next());
if (cctx.cache().putIfAbsent(t.getKey(), t.getValue()))
return 1;
else
throw new IgniteSQLException("Duplicate key during INSERT [key=" + t.getKey() + ']', DUPLICATE_KEY);
} else {
// Keys that failed to INSERT due to duplication.
DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
for (List<?> row : cursor) {
final IgniteBiTuple keyValPair = plan.processRow(row);
sender.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()), 0);
}
sender.flush();
SQLException resEx = sender.error();
if (!F.isEmpty(sender.failedKeys())) {
String msg = "Failed to INSERT some keys because they are already in cache " + "[keys=" + sender.failedKeys() + ']';
SQLException dupEx = new SQLException(msg, SqlStateCode.CONSTRAINT_VIOLATION);
if (resEx == null)
resEx = dupEx;
else
resEx.setNextException(dupEx);
}
if (resEx != null)
throw new IgniteSQLException(resEx);
return sender.updateCount();
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class SqlFieldsQuerySelfTest method testQueryCaching.
/**
* @throws Exception If error.
*/
public void testQueryCaching() throws Exception {
startGrid(0);
PreparedStatement stmt = null;
for (int i = 0; i < 2; i++) {
createAndFillCache();
PreparedStatement stmt0 = grid(0).context().query().prepareNativeStatement("person", INSERT);
// Statement should either be parsed initially or in response to schema change...
assertTrue(stmt != stmt0);
stmt = stmt0;
// ...and be properly compiled considering schema changes to be properly parsed
new GridSqlQueryParser(false).parse(GridSqlQueryParser.prepared(stmt));
destroyCache();
}
stmt = null;
createAndFillCache();
// Now let's do the same without restarting the cache.
for (int i = 0; i < 2; i++) {
PreparedStatement stmt0 = grid(0).context().query().prepareNativeStatement("person", INSERT);
// Statement should either be parsed or taken from cache as no schema changes occurred...
assertTrue(stmt == null || stmt == stmt0);
stmt = stmt0;
// ...and be properly compiled considering schema changes to be properly parsed
new GridSqlQueryParser(false).parse(GridSqlQueryParser.prepared(stmt));
}
destroyCache();
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
* @param lazy Streaming flag.
*/
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
if (lazy && MapQueryLazyWorker.currentWorker() == null) {
// Lazy queries must be re-submitted to dedicated workers.
MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
worker.submit(new Runnable() {
@Override
public void run() {
onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
}
});
if (lazyWorkerBusyLock.enterBusy()) {
try {
MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
if (oldWorker != null)
oldWorker.stop();
IgniteThread thread = new IgniteThread(worker);
thread.start();
} finally {
lazyWorkerBusyLock.leaveBusy();
}
} else
log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
return;
}
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
// Unregister lazy worker because re-try may never reach this node again.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.cancelled()) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
// All request results are in the memory in result set already, so it's ok to release partitions.
if (!lazy)
releaseReservations();
} catch (Throwable e) {
releaseReservations();
throw e;
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
// Unregister worker after possible cancellation.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class DmlAstUtils method injectKeysFilterParam.
/**
* Append additional condition to WHERE for it to select only specific keys.
*
* @param where Initial condition.
* @param keyCol Column to base the new condition on.
* @return New condition.
*/
private static GridSqlElement injectKeysFilterParam(GridSqlElement where, GridSqlColumn keyCol, int paramIdx) {
// Yes, we need a subquery for "WHERE _key IN ?" to work with param being an array without dirty query rewriting.
GridSqlSelect sel = new GridSqlSelect();
GridSqlFunction from = new GridSqlFunction(GridSqlFunctionType.TABLE);
sel.from(from);
GridSqlColumn col = new GridSqlColumn(null, from, null, "TABLE", "_IGNITE_ERR_KEYS");
sel.addColumn(col, true);
GridSqlAlias alias = new GridSqlAlias("_IGNITE_ERR_KEYS", new GridSqlParameter(paramIdx));
alias.resultType(keyCol.resultType());
from.addChild(alias);
GridSqlElement e = new GridSqlOperation(GridSqlOperationType.IN, keyCol, new GridSqlSubquery(sel));
if (where == null)
return e;
else
return new GridSqlOperation(GridSqlOperationType.AND, where, e);
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class SystemViewCommandTest method testPagesList.
/**
*/
@Test
public void testPagesList() throws Exception {
String cacheName = "cacheFL";
IgniteCache<Integer, byte[]> cache = ignite0.getOrCreateCache(new CacheConfiguration<Integer, byte[]>().setName(cacheName).setAffinity(new RendezvousAffinityFunction().setPartitions(1)));
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ignite0.context().cache().context().database();
int pageSize = dbMgr.pageSize();
try {
dbMgr.enableCheckpoints(false).get();
int key = 0;
// Fill up different free-list buckets.
for (int j = 0; j < pageSize / 2; j++) cache.put(key++, new byte[j + 1]);
// Put some pages to one bucket to overflow pages cache.
for (int j = 0; j < 1000; j++) cache.put(key++, new byte[pageSize / 2]);
List<List<String>> cacheGrpView = systemView(ignite0, CACHE_GRP_PAGE_LIST_VIEW);
List<List<String>> dataRegionView = systemView(ignite0, DATA_REGION_PAGE_LIST_VIEW);
String cacheId = Integer.toString(cacheId(cacheName));
// Test filtering by 3 columns.
assertFalse(cacheGrpView.stream().noneMatch(row -> cacheId.equals(row.get(0)) && "0".equals(row.get(1)) && "0".equals(row.get(3))));
// Test filtering with invalid cache group id.
assertTrue(cacheGrpView.stream().noneMatch(row -> "-1".equals(row.get(0))));
// Test filtering with invalid partition id.
assertTrue(cacheGrpView.stream().noneMatch(row -> "-1".equals(row.get(1))));
// Test filtering with invalid bucket number.
assertTrue(cacheGrpView.stream().noneMatch(row -> "-1".equals(row.get(3))));
assertFalse(cacheGrpView.stream().noneMatch(row -> Integer.parseInt(row.get(4)) > 0 && cacheId.equals(row.get(0))));
assertFalse(cacheGrpView.stream().noneMatch(row -> Integer.parseInt(row.get(5)) > 0 && cacheId.equals(row.get(0))));
assertFalse(cacheGrpView.stream().noneMatch(row -> Integer.parseInt(row.get(6)) > 0 && cacheId.equals(row.get(0))));
assertFalse(dataRegionView.stream().noneMatch(row -> row.get(0).startsWith(DATA_REGION_NAME)));
assertTrue(dataRegionView.stream().noneMatch(row -> row.get(0).startsWith(DATA_REGION_NAME) && Integer.parseInt(row.get(4)) > 0));
} finally {
dbMgr.enableCheckpoints(true).get();
}
ignite0.cluster().state(INACTIVE);
ignite0.cluster().state(ACTIVE);
IgniteCache<Integer, Integer> cacheInMemory = ignite0.getOrCreateCache(new CacheConfiguration<Integer, Integer>().setName("cacheFLInMemory").setDataRegionName(DATA_REGION_NAME));
cacheInMemory.put(0, 0);
// After activation/deactivation new view for data region pages lists should be created, check that new view
// correctly reflects changes in free-lists.
assertTrue(systemView(ignite0, DATA_REGION_PAGE_LIST_VIEW).stream().noneMatch(row -> row.get(0).startsWith(DATA_REGION_NAME) && Integer.parseInt(row.get(4)) > 0));
}
Aggregations