use of org.h2.schema.Schema in project ignite by apache.
the class GridReduceQueryExecutor method update.
/**
* @param schemaName Schema name.
* @param cacheIds Cache ids.
* @param selectQry Select query.
* @param params SQL parameters.
* @param enforceJoinOrder Enforce join order of tables.
* @param pageSize Page size.
* @param timeoutMillis Timeout.
* @param parts Partitions.
* @param isReplicatedOnly Whether query uses only replicated caches.
* @param cancel Cancel state.
* @return Update result, or {@code null} when some map node doesn't support distributed DML.
*/
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
final long reqId = qryIdGen.incrementAndGet();
final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
Collection<ClusterNode> nodes = nodesParts.nodes();
if (nodes == null)
throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
if (isReplicatedOnly) {
ClusterNode locNode = ctx.discovery().localNode();
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else
nodes = singletonList(F.rand(nodes));
}
for (ClusterNode n : nodes) {
if (!n.version().greaterThanEqual(2, 3, 0)) {
log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
return null;
}
}
final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
updRuns.put(reqId, r);
boolean release = false;
try {
Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
final Collection<ClusterNode> finalNodes = nodes;
cancel.set(new Runnable() {
@Override
public void run() {
r.future().onCancelled();
send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
}
});
// send() logs the debug message
if (send(nodes, req, partsSpec, false))
return r.future().get();
throw new CacheException("Failed to send update request to participating nodes.");
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
throw new CacheException("Failed to run update. " + e.getMessage(), e);
} finally {
if (release)
send(nodes, new GridQueryCancelRequest(reqId), null, false);
if (!updRuns.remove(reqId, r))
U.warn(log, "Update run was already removed: " + reqId);
}
}
use of org.h2.schema.Schema in project ignite by apache.
the class GridH2Table method removeChildrenAndResources.
/**
* {@inheritDoc}
*/
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
@Override
public void removeChildrenAndResources(Session ses) {
lock(true);
try {
super.removeChildrenAndResources(ses);
// Clear all user indexes registered in schema.
while (idxs.size() > sysIdxsCnt) {
Index idx = idxs.get(sysIdxsCnt);
if (idx.getName() != null && idx.getSchema().findIndex(ses, idx.getName()) == idx) {
// This call implicitly removes both idx and its proxy, if any, from idxs.
database.removeSchemaObject(ses, idx);
// We have to call destroy here if we are who has removed this index from the table.
if (idx instanceof GridH2IndexBase)
((GridH2IndexBase) idx).destroy(rmIndex);
}
}
if (SysProperties.CHECK) {
for (SchemaObject obj : database.getAllSchemaObjects(DbObject.INDEX)) {
Index idx = (Index) obj;
if (idx.getTable() == this)
DbException.throwInternalError("index not dropped: " + idx.getName());
}
}
database.removeMeta(ses, getId());
invalidate();
} finally {
unlock(true);
}
}
use of org.h2.schema.Schema in project ignite by apache.
the class GridSqlFunction method getSQL.
/**
* {@inheritDoc}
*/
@Override
public String getSQL() {
StatementBuilder buff = new StatementBuilder();
if (schema != null)
buff.append(Parser.quoteIdentifier(schema)).append('.');
// We don't need to quote identifier as long as H2 never does so with function names when generating plan SQL.
// On the other hand, quoting identifiers that also serve as keywords (like CURRENT_DATE() and CURRENT_DATE)
// turns CURRENT_DATE() into "CURRENT_DATE"(), which is not good.
buff.append(name);
if (type == CASE) {
buff.append(' ').append(child().getSQL());
for (int i = 1, len = size() - 1; i < len; i += 2) {
buff.append(" WHEN ").append(child(i).getSQL());
buff.append(" THEN ").append(child(i + 1).getSQL());
}
if ((size() & 1) == 0)
buff.append(" ELSE ").append(child(size() - 1).getSQL());
return buff.append(" END").toString();
}
buff.append('(');
switch(type) {
case CAST:
case CONVERT:
assert size() == 1;
String castType = resultType().sql();
assert !F.isEmpty(castType) : castType;
buff.append(child().getSQL());
buff.append(type == CAST ? " AS " : ",");
buff.append(castType);
break;
case EXTRACT:
ValueString v = (ValueString) ((GridSqlConst) child(0)).value();
buff.append(v.getString()).append(" FROM ").append(child(1).getSQL());
break;
case TABLE:
for (int i = 0; i < size(); i++) {
buff.appendExceptFirst(", ");
GridSqlElement e = child(i);
// id int = ?, name varchar = ('aaa', 'bbb')
buff.append(Parser.quoteIdentifier(((GridSqlAlias) e).alias())).append(' ').append(e.resultType().sql()).append('=').append(e.child().getSQL());
}
break;
default:
for (int i = 0; i < size(); i++) {
buff.appendExceptFirst(", ");
buff.append(child(i).getSQL());
}
}
return buff.append(')').toString();
}
use of org.h2.schema.Schema in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoinMode Query distributed join mode.
* @param lazy Streaming flag.
*/
private void onQueryRequest0(final ClusterNode node, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final DistributedJoinMode distributedJoinMode, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy) {
if (lazy && MapQueryLazyWorker.currentWorker() == null) {
// Lazy queries must be re-submitted to dedicated workers.
MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
MapQueryLazyWorker worker = new MapQueryLazyWorker(ctx.igniteInstanceName(), key, log, this);
worker.submit(new Runnable() {
@Override
public void run() {
onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts, pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
}
});
if (lazyWorkerBusyLock.enterBusy()) {
try {
MapQueryLazyWorker oldWorker = lazyWorkers.put(key, worker);
if (oldWorker != null)
oldWorker.stop();
IgniteThread thread = new IgniteThread(worker);
thread.start();
} finally {
lazyWorkerBusyLock.leaveBusy();
}
} else
log.info("Ignored query request (node is stopping) [nodeId=" + node.id() + ", reqId=" + reqId + ']');
return;
}
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qr = null;
List<GridReservable> reserved = new ArrayList<>();
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
// Unregister lazy worker because re-try may never reach this node again.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
sendRetry(node, reqId, segmentId);
return;
}
}
qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
if (nodeRess.put(reqId, segmentId, qr) != null)
throw new IllegalStateException();
// Prepare query context.
GridH2QueryContext qctx = new GridH2QueryContext(ctx.localNodeId(), node.id(), reqId, segmentId, replicated ? REPLICATED : MAP).filter(h2.backupFilter(topVer, parts)).partitionsMap(partsMap).distributedJoinMode(distributedJoinMode).pageSize(pageSize).topologyVersion(topVer).reservations(reserved);
Connection conn = h2.connectionForSchema(schemaName);
H2Utils.setupConnection(conn, distributedJoinMode != OFF, enforceJoinOrder);
GridH2QueryContext.set(qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
try {
if (nodeRess.cancelled(reqId)) {
GridH2QueryContext.clear(ctx.localNodeId(), node.id(), reqId, qctx.type());
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
ResultSet rs = null;
// If we are not the target node for this replicated query, just ignore it.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
rs = h2.executeSqlQueryWithTimer(conn, qry.query(), F.asList(qry.parameters(params)), true, timeout, qr.queryCancel(qryIdx));
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
}
qr.addResult(qryIdx, qry, node.id(), rs, params);
if (qr.cancelled()) {
qr.result(qryIdx).close();
throw new QueryCancelledException();
}
// Send the first page.
sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
qryIdx++;
}
// All request results are in the memory in result set already, so it's ok to release partitions.
if (!lazy)
releaseReservations();
} catch (Throwable e) {
releaseReservations();
throw e;
}
} catch (Throwable e) {
if (qr != null) {
nodeRess.remove(reqId, segmentId, qr);
qr.cancel(false);
}
// Unregister worker after possible cancellation.
if (lazy)
stopAndUnregisterCurrentLazyWorker();
if (X.hasCause(e, GridH2RetryException.class))
sendRetry(node, reqId, segmentId);
else {
U.error(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
if (e instanceof Error)
throw (Error) e;
}
} finally {
if (reserved != null) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
}
}
use of org.h2.schema.Schema in project elastic-core-maven by OrdinaryDude.
the class FullTextTrigger method search.
/**
* Search the Lucene index
*
* The result set will have the following columns:
* SCHEMA - Schema name (String)
* TABLE - Table name (String)
* COLUMNS - Primary key column names (String[]) - this is always DB_ID
* KEYS - Primary key values (Long[]) - this is always the DB_ID value for the table row
* SCORE - Lucene score (Float)
*
* @param conn SQL connection
* @param schema Schema name
* @param table Table name
* @param queryText Query expression
* @param limit Number of rows to return
* @param offset Offset with result set
* @return Search results
* @throws SQLException Unable to search the index
*/
public static ResultSet search(Connection conn, String schema, String table, String queryText, int limit, int offset) throws SQLException {
//
// Get Lucene index access
//
getIndexAccess(conn);
//
// Create the result set columns
//
SimpleResultSet result = new SimpleResultSet();
result.addColumn("SCHEMA", Types.VARCHAR, 0, 0);
result.addColumn("TABLE", Types.VARCHAR, 0, 0);
result.addColumn("COLUMNS", Types.ARRAY, 0, 0);
result.addColumn("KEYS", Types.ARRAY, 0, 0);
result.addColumn("SCORE", Types.FLOAT, 0, 0);
//
// Perform the search
//
// The _QUERY field contains the table and row identification (schema.table;keyName;keyValue)
// The _TABLE field is used to limit the search results to the current table
// The _DATA field contains the indexed row data (this is the default search field)
// The _MODIFIED field contains the row modification time (YYYYMMDDhhmmss) in GMT
//
indexLock.readLock().lock();
try {
QueryParser parser = new QueryParser("_DATA", analyzer);
parser.setDateResolution("_MODIFIED", DateTools.Resolution.SECOND);
parser.setDefaultOperator(QueryParser.Operator.AND);
Query query = parser.parse("_TABLE:" + schema.toUpperCase() + "." + table.toUpperCase() + " AND (" + queryText + ")");
TopDocs documents = indexSearcher.search(query, limit);
ScoreDoc[] hits = documents.scoreDocs;
int resultCount = Math.min(hits.length, (limit == 0 ? hits.length : limit));
int resultOffset = Math.min(offset, resultCount);
for (int i = resultOffset; i < resultCount; i++) {
Document document = indexSearcher.doc(hits[i].doc);
String[] indexParts = document.get("_QUERY").split(";");
String[] nameParts = indexParts[0].split("\\.");
result.addRow(nameParts[0], nameParts[1], new String[] { indexParts[1] }, new Long[] { Long.parseLong(indexParts[2]) }, hits[i].score);
}
} catch (ParseException exc) {
Logger.logDebugMessage("Lucene parse exception for query: " + queryText + "\n" + exc.getMessage());
throw new SQLException("Lucene parse exception for query: " + queryText + "\n" + exc.getMessage());
} catch (IOException exc) {
Logger.logErrorMessage("Unable to search Lucene index", exc);
throw new SQLException("Unable to search Lucene index", exc);
} finally {
indexLock.readLock().unlock();
}
return result;
}
Aggregations