use of org.h2.store.Page in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param tbls Tables.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
*/
private void onQueryRequest0(ClusterNode node, long reqId, int segmentId, String schemaName, Collection<GridCacheSqlQuery> qrys, List<Integer> cacheIds, AffinityTopologyVersion topVer, Map<UUID, int[]> partsMap, int[] parts, Collection<QueryTable> tbls, int pageSize, DistributedJoinMode distributedJoinMode, boolean enforceJoinOrder, boolean replicated, int timeout, Object[] params) {
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
NodeResults nodeRess = resultsForNode(node.id());
QueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new QueryResults(reqId, qrys.size(), mainCctx != null ? mainCctx.name() : null);
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
List<GridH2Table> snapshotedTbls = null;
if (!F.isEmpty(tbls)) {
snapshotedTbls = new ArrayList<>(tbls.size());
for (QueryTable tbl : tbls) {
GridH2Table h2Tbl = h2.dataTable(tbl);
Objects.requireNonNull(h2Tbl, tbl.toString());
h2Tbl.snapshotIndexes(qctx, segmentId);
snapshotedTbls.add(h2Tbl);
}
}
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && ctx.event().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.cancels[qryIdx]);
if (evt) {
assert mainCctx != null;
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.canceled) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
} finally {
GridH2QueryContext.clearThreadLocal();
if (distributedJoinMode == OFF)
qctx.clearContext(false);
if (!F.isEmpty(snapshotedTbls)) {
for (GridH2Table dataTbl : snapshotedTbls) dataTbl.releaseSnapshots();
}
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.h2.store.Page in project ignite by apache.
the class MapQueryResult method fetchNextPage.
/**
* @param rows Collection to fetch into.
* @param pageSize Page size.
* @return {@code true} If there are no more rows available.
*/
synchronized boolean fetchNextPage(List<Value[]> rows, int pageSize) {
assert lazyWorker == null || lazyWorker == MapQueryLazyWorker.currentWorker();
if (closed)
return true;
boolean readEvt = cctx != null && cctx.name() != null && cctx.events().isRecordable(EVT_CACHE_QUERY_OBJECT_READ);
page++;
for (int i = 0; i < pageSize; i++) {
if (!res.next())
return true;
Value[] row = res.currentRow();
if (cpNeeded) {
boolean copied = false;
for (int j = 0; j < row.length; j++) {
Value val = row[j];
if (val instanceof GridH2ValueCacheObject) {
GridH2ValueCacheObject valCacheObj = (GridH2ValueCacheObject) val;
row[j] = new GridH2ValueCacheObject(valCacheObj.getCacheObject(), h2.objectContext()) {
@Override
public Object getObject() {
return getObject(true);
}
};
copied = true;
}
}
if (i == 0 && !copied)
// No copy on read caches, skip next checks.
cpNeeded = false;
}
assert row != null;
if (readEvt) {
GridKernalContext ctx = h2.kernalContext();
ctx.event().record(new CacheQueryReadEvent<>(ctx.discovery().localNode(), "SQL fields query result set row read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.SQL.name(), cctx.name(), null, qry.query(), null, null, params, qrySrcNodeId, null, null, null, null, row(row)));
}
rows.add(res.currentRow());
}
return false;
}
use of org.h2.store.Page in project ignite by apache.
the class InlineIndexHelper method compareAsString.
/**
* @param pageAddr Page address.
* @param off Offset.
* @param v Value to compare.
* @param ignoreCase {@code True} if a case-insensitive comparison should be used.
* @return Compare result ({@code -2} means we can't compare).
*/
private int compareAsString(long pageAddr, int off, Value v, boolean ignoreCase) {
String s = v.getString();
int len1 = PageUtils.getShort(pageAddr, off + 1) & 0x7FFF;
int len2 = s.length();
int c, c2, c3, c4, cntr1 = 0, cntr2 = 0;
char v1, v2;
// Skip length and type byte.
long addr = pageAddr + off + 3;
// Try reading ASCII.
while (cntr1 < len1 && cntr2 < len2) {
c = (int) GridUnsafe.getByte(addr) & 0xFF;
if (c > 127)
break;
cntr1++;
addr++;
v1 = (char) c;
v2 = s.charAt(cntr2++);
if (ignoreCase) {
v1 = Character.toUpperCase(v1);
v2 = Character.toUpperCase(v2);
}
if (v1 != v2)
return fixSort(Integer.signum(v1 - v2), sortType());
}
// read other
while (cntr1 < len1 && cntr2 < len2) {
c = (int) GridUnsafe.getByte(addr++) & 0xFF;
switch(c >> 4) {
case 0:
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
/* 0xxxxxxx*/
cntr1++;
v1 = (char) c;
break;
case 12:
case 13:
/* 110x xxxx 10xx xxxx*/
cntr1 += 2;
if (cntr1 > len1)
throw new IllegalStateException("Malformed input (partial character at the end).");
c2 = (int) GridUnsafe.getByte(addr++) & 0xFF;
if ((c2 & 0xC0) != 0x80)
throw new IllegalStateException("Malformed input around byte: " + (cntr1 - 2));
c = c & 0x1F;
c = (c << 6) | (c2 & 0x3F);
v1 = (char) c;
break;
case 14:
/* 1110 xxxx 10xx xxxx 10xx xxxx */
cntr1 += 3;
if (cntr1 > len1)
throw new IllegalStateException("Malformed input (partial character at the end).");
c2 = (int) GridUnsafe.getByte(addr++) & 0xFF;
c3 = (int) GridUnsafe.getByte(addr++) & 0xFF;
if (((c2 & 0xC0) != 0x80) || ((c3 & 0xC0) != 0x80))
throw new IllegalStateException("Malformed input around byte: " + (cntr1 - 3));
c = c & 0x0F;
c = (c << 6) | (c2 & 0x3F);
c = (c << 6) | (c3 & 0x3F);
v1 = (char) c;
break;
case 15:
/* 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
cntr1 += 4;
if (cntr1 > len1)
throw new IllegalStateException("Malformed input (partial character at the end).");
c2 = (int) GridUnsafe.getByte(addr++) & 0xFF;
c3 = (int) GridUnsafe.getByte(addr++) & 0xFF;
c4 = (int) GridUnsafe.getByte(addr++) & 0xFF;
if (((c & 0xF8) != 0xf0) || ((c2 & 0xC0) != 0x80) || ((c3 & 0xC0) != 0x80) || ((c4 & 0xC0) != 0x80))
throw new IllegalStateException("Malformed input around byte: " + (cntr1 - 4));
c = c & 0x07;
c = (c << 6) | (c2 & 0x3F);
c = (c << 6) | (c3 & 0x3F);
c = (c << 6) | (c4 & 0x3F);
// Subtract 0x010000, c is now 0..fffff (20 bits)
c = c - 0x010000;
// height surrogate
v1 = (char) (0xD800 + ((c >> 10) & 0x7FF));
v2 = s.charAt(cntr2++);
if (v1 != v2)
return fixSort(Integer.signum(v1 - v2), sortType());
if (cntr2 == len2)
// Finish comparison here.
return fixSort(1, sortType());
// Low surrogate.
v1 = (char) (0xDC00 + (c & 0x3FF));
v2 = s.charAt(cntr2++);
if (v1 != v2)
return fixSort(Integer.signum(v1 - v2), sortType());
continue;
default:
/* 10xx xxxx */
throw new IllegalStateException("Malformed input around byte: " + cntr1);
}
v2 = s.charAt(cntr2++);
if (ignoreCase) {
v1 = Character.toUpperCase(v1);
v2 = Character.toUpperCase(v2);
}
if (v1 != v2)
return fixSort(Integer.signum(v1 - v2), sortType());
}
int res = cntr1 == len1 && cntr2 == len2 ? 0 : cntr1 == len1 ? -1 : 1;
if (isValueFull(pageAddr, off))
return fixSort(res, sortType());
if (res >= 0)
// b) Even truncated current value is longer, so that it's bigger.
return fixSort(1, sortType());
return -2;
}
use of org.h2.store.Page in project ignite by apache.
the class InlineIndexHelper method compare.
/**
* @param pageAddr Page address.
* @param off Offset.
* @param maxSize Maximum size to read.
* @param v Value to compare.
* @param comp Comparator.
* @return Compare result (-2 means we can't compare).
*/
public int compare(long pageAddr, int off, int maxSize, Value v, Comparator<Value> comp) {
int c = tryCompareOptimized(pageAddr, off, maxSize, v);
if (c != Integer.MIN_VALUE)
return c;
Value v1 = get(pageAddr, off, maxSize);
if (v1 == null)
return -2;
c = Integer.signum(comp.compare(v1, v));
if (size > 0)
return fixSort(c, sortType());
if (isValueFull(pageAddr, off) || canRelyOnCompare(c, v1, v))
return fixSort(c, sortType());
return -2;
}
use of org.h2.store.Page in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
* @param lazy Streaming flag.
*/
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
if (lazy && MapQueryLazyWorker.currentWorker() == null) {
// Lazy queries must be re-submitted to dedicated workers.
MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
worker.submit(new Runnable() {
@Override
public void run() {
onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
}
});
if (lazyWorkerBusyLock.enterBusy()) {
try {
MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
if (oldWorker != null)
oldWorker.stop();
IgniteThread thread = new IgniteThread(worker);
thread.start();
} finally {
lazyWorkerBusyLock.leaveBusy();
}
} else
log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
return;
}
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
// Unregister lazy worker because re-try may never reach this node again.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.cancelled()) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
// All request results are in the memory in result set already, so it's ok to release partitions.
if (!lazy)
releaseReservations();
} catch (Throwable e) {
releaseReservations();
throw e;
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
// Unregister worker after possible cancellation.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
Aggregations