use of org.h2.util.Cache in project elastic-core-maven by OrdinaryDude.
the class DbShellServlet method doPost.
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.setHeader("Cache-Control", "no-cache, no-store, must-revalidate, private");
resp.setHeader("Pragma", "no-cache");
resp.setDateHeader("Expires", 0);
if (!API.isAllowed(req.getRemoteHost())) {
resp.sendError(HttpServletResponse.SC_FORBIDDEN);
return;
}
String body = null;
if (!API.disableAdminPassword) {
if (API.adminPassword.isEmpty()) {
body = errorNoPasswordIsConfigured;
} else {
try {
API.verifyPassword(req);
if ("true".equals(req.getParameter("showShell"))) {
body = form.replace("{adminPassword}", URLEncoder.encode(req.getParameter("adminPassword"), "UTF-8"));
}
} catch (ParameterException exc) {
String desc = (String) ((JSONObject) JSONValue.parse(JSON.toString(exc.getErrorResponse()))).get("errorDescription");
body = String.format(passwordFormTemplate, "<p style=\"color:red\">" + desc + "</p>");
}
}
}
if (body != null) {
try (PrintStream out = new PrintStream(resp.getOutputStream())) {
out.print(header);
out.print(body);
out.print(barter);
}
return;
}
String line = Convert.nullToEmpty(req.getParameter("line"));
try (PrintStream out = new PrintStream(resp.getOutputStream())) {
out.println("\n> " + line);
try {
Shell shell = new Shell();
shell.setErr(out);
shell.setOut(out);
shell.runTool(Db.db.getConnection(), "-sql", line);
} catch (SQLException e) {
out.println(e.toString());
}
}
}
use of org.h2.util.Cache in project ignite by apache.
the class GridReduceQueryExecutor method stableDataNodesMap.
/**
* @param topVer Topology version.
* @param cctx Cache context.
* @param parts Partitions.
*/
private Map<ClusterNode, IntArray> stableDataNodesMap(AffinityTopologyVersion topVer, final GridCacheContext<?, ?> cctx, @Nullable final int[] parts) {
Map<ClusterNode, IntArray> mapping = new HashMap<>();
// Explicit partitions mapping is not applicable to replicated cache.
if (cctx.isReplicated()) {
for (ClusterNode clusterNode : cctx.affinity().assignment(topVer).nodes()) mapping.put(clusterNode, null);
return mapping;
}
List<List<ClusterNode>> assignment = cctx.affinity().assignment(topVer).assignment();
boolean needPartsFilter = parts != null;
GridIntIterator iter = needPartsFilter ? new GridIntList(parts).iterator() : U.forRange(0, cctx.affinity().partitions());
while (iter.hasNext()) {
int partId = iter.next();
List<ClusterNode> partNodes = assignment.get(partId);
if (!partNodes.isEmpty()) {
ClusterNode prim = partNodes.get(0);
if (!needPartsFilter) {
mapping.put(prim, null);
continue;
}
IntArray partIds = mapping.get(prim);
if (partIds == null) {
partIds = new IntArray();
mapping.put(prim, partIds);
}
partIds.add(partId);
}
}
return mapping;
}
use of org.h2.util.Cache in project ignite by apache.
the class GridReduceQueryExecutor method query.
/**
* @param schemaName Schema name.
* @param qry Query.
* @param keepBinary Keep binary.
* @param enforceJoinOrder Enforce join order of tables.
* @param timeoutMillis Timeout in milliseconds.
* @param cancel Query cancel.
* @param params Query parameters.
* @param parts Partitions.
* @param lazy Lazy execution flag.
* @return Rows iterator.
*/
public Iterator<List<?>> query(String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, final int[] parts, boolean lazy) {
if (F.isEmpty(params))
params = EMPTY_PARAMS;
final boolean isReplicatedOnly = qry.isReplicatedOnly();
// Fail if all caches are replicated and explicit partitions are set.
for (int attempt = 0; ; attempt++) {
if (attempt != 0) {
try {
// Wait for exchange.
Thread.sleep(attempt * 10);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException("Query was interrupted.", e);
}
}
final long qryReqId = qryIdGen.incrementAndGet();
final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName, h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(), U.currentTimeMillis(), cancel);
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
// Check if topology is changed while retrying on locked topology.
if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
}
List<Integer> cacheIds = qry.cacheIds();
Collection<ClusterNode> nodes;
// Explicit partition mapping for unstable topology.
Map<ClusterNode, IntArray> partsMap = null;
// Explicit partitions mapping for query.
Map<ClusterNode, IntArray> qryMap = null;
// Partitions are not supported for queries over all replicated caches.
if (parts != null) {
boolean replicatedOnly = true;
for (Integer cacheId : cacheIds) {
if (!cacheContext(cacheId).isReplicated()) {
replicatedOnly = false;
break;
}
}
if (replicatedOnly)
throw new CacheException("Partitions are not supported for replicated caches");
}
if (qry.isLocal())
nodes = singletonList(ctx.discovery().localNode());
else {
NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
nodes = nodesParts.nodes();
partsMap = nodesParts.partitionsMap();
qryMap = nodesParts.queryPartitionsMap();
if (nodes == null)
// Retry.
continue;
assert !nodes.isEmpty();
if (isReplicatedOnly || qry.explain()) {
ClusterNode locNode = ctx.discovery().localNode();
// Always prefer local node if possible.
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else {
// Select random data node to run query on a replicated data or
// get EXPLAIN PLAN from a single node.
nodes = singletonList(F.rand(nodes));
}
}
}
int tblIdx = 0;
final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();
final int segmentsPerIndex = qry.explain() || isReplicatedOnly ? 1 : findFirstPartitioned(cacheIds).config().getQueryParallelism();
int replicatedQrysCnt = 0;
final Collection<ClusterNode> finalNodes = nodes;
for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
GridMergeIndex idx;
if (!skipMergeTbl) {
GridMergeTable tbl;
try {
tbl = createMergeTable(r.connection(), mapQry, qry.explain());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
idx = tbl.getMergeIndex();
fakeTable(r.connection(), tblIdx++).innerTable(tbl);
} else
idx = GridMergeIndexUnsorted.createDummy(ctx);
// If the query has only replicated tables, we have to run it on a single node only.
if (!mapQry.isPartitioned()) {
ClusterNode node = F.rand(nodes);
mapQry.node(node.id());
replicatedQrysCnt++;
// Replicated tables can have only 1 segment.
idx.setSources(singletonList(node), 1);
} else
idx.setSources(nodes, segmentsPerIndex);
idx.setPageSize(r.pageSize());
r.indexes().add(idx);
}
r.latch(new CountDownLatch(isReplicatedOnly ? 1 : (r.indexes().size() - replicatedQrysCnt) * nodes.size() * segmentsPerIndex + replicatedQrysCnt));
runs.put(qryReqId, r);
boolean release = true;
try {
cancel.checkCancelled();
if (ctx.clientDisconnected()) {
throw new CacheException("Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException(ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
}
List<GridCacheSqlQuery> mapQrys = qry.mapQueries();
if (qry.explain()) {
mapQrys = new ArrayList<>(qry.mapQueries().size());
for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query()).parameterIndexes(mapQry.parameterIndexes()));
}
final boolean distributedJoins = qry.distributedJoins();
cancel.set(new Runnable() {
@Override
public void run() {
send(finalNodes, new GridQueryCancelRequest(qryReqId), null, false);
}
});
boolean retry = false;
// Always enforce join order on map side to have consistent behavior.
int flags = GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
if (distributedJoins)
flags |= GridH2QueryRequest.FLAG_DISTRIBUTED_JOINS;
if (qry.isLocal())
flags |= GridH2QueryRequest.FLAG_IS_LOCAL;
if (qry.explain())
flags |= GridH2QueryRequest.FLAG_EXPLAIN;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
if (lazy && mapQrys.size() == 1)
flags |= GridH2QueryRequest.FLAG_LAZY;
GridH2QueryRequest req = new GridH2QueryRequest().requestId(qryReqId).topologyVersion(topVer).pageSize(r.pageSize()).caches(qry.cacheIds()).tables(distributedJoins ? qry.tables() : null).partitions(convert(partsMap)).queries(mapQrys).parameters(params).flags(flags).timeout(timeoutMillis).schemaName(schemaName);
if (send(nodes, req, parts == null ? null : new ExplicitPartitionsSpecializer(qryMap), false)) {
awaitAllReplies(r, nodes, cancel);
Object state = r.state();
if (state != null) {
if (state instanceof CacheException) {
CacheException err = (CacheException) state;
if (err.getCause() instanceof IgniteClientDisconnectedException)
throw err;
if (wasCancelled(err))
// Throw correct exception.
throw new QueryCancelledException();
throw new CacheException("Failed to run map query remotely." + err.getMessage(), err);
}
if (state instanceof AffinityTopologyVersion) {
retry = true;
// If remote node asks us to retry then we have outdated full partition map.
h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
}
}
} else
// Send failed.
retry = true;
Iterator<List<?>> resIter = null;
if (!retry) {
if (skipMergeTbl) {
resIter = new GridMergeIndexIterator(this, finalNodes, r, qryReqId, qry.distributedJoins());
release = false;
} else {
cancel.checkCancelled();
UUID locNodeId = ctx.localNodeId();
H2Utils.setupConnection(r.connection(), false, enforceJoinOrder);
GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, qryReqId, REDUCE).pageSize(r.pageSize()).distributedJoinMode(OFF));
try {
if (qry.explain())
return explainPlan(r.connection(), qry, params);
GridCacheSqlQuery rdc = qry.reduceQuery();
ResultSet res = h2.executeSqlQueryWithTimer(r.connection(), rdc.query(), F.asList(rdc.parameters(params)), // The statement will cache some extra thread local objects.
false, timeoutMillis, cancel);
resIter = new H2FieldsIterator(res);
} finally {
GridH2QueryContext.clearThreadLocal();
}
}
}
if (retry) {
if (Thread.currentThread().isInterrupted())
throw new IgniteInterruptedCheckedException("Query was interrupted.");
continue;
}
return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.closeQuiet(r.connection());
if (e instanceof CacheException) {
if (wasCancelled((CacheException) e))
throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
throw (CacheException) e;
}
Throwable cause = e;
if (e instanceof IgniteCheckedException) {
Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
if (disconnectedErr != null)
cause = disconnectedErr;
}
throw new CacheException("Failed to run reduce query locally.", cause);
} finally {
if (release) {
releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins());
if (!skipMergeTbl) {
for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) // Drop all merge tables.
fakeTable(null, i).innerTable(null);
}
}
}
}
}
use of org.h2.util.Cache in project ignite by apache.
the class IgniteH2Indexing method stop.
/**
* {@inheritDoc}
*/
@Override
public void stop() throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Stopping cache query index...");
mapQryExec.cancelLazyWorkers();
for (Connection c : conns.values()) U.close(c, log);
conns.clear();
schemas.clear();
cacheName2schema.clear();
try (Connection c = DriverManager.getConnection(dbUrl);
Statement s = c.createStatement()) {
s.execute("SHUTDOWN");
} catch (SQLException e) {
U.error(log, "Failed to shutdown database.", e);
}
if (stmtCacheCleanupTask != null)
stmtCacheCleanupTask.close();
if (connCleanupTask != null)
connCleanupTask.close();
GridH2QueryContext.clearLocalNodeStop(nodeId);
if (log.isDebugEnabled())
log.debug("Cache query index stopped.");
}
use of org.h2.util.Cache in project ignite by apache.
the class IgniteH2Indexing method start.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "NonThreadSafeLazyInitialization", "deprecation" })
@Override
public void start(GridKernalContext ctx, GridSpinBusyLock busyLock) throws IgniteCheckedException {
if (log.isDebugEnabled())
log.debug("Starting cache query index...");
this.busyLock = busyLock;
qryIdGen = new AtomicLong();
if (SysProperties.serializeJavaObject) {
U.warn(log, "Serialization of Java objects in H2 was enabled.");
SysProperties.serializeJavaObject = false;
}
String dbName = (ctx != null ? ctx.localNodeId() : UUID.randomUUID()).toString();
dbUrl = "jdbc:h2:mem:" + dbName + DB_OPTIONS;
org.h2.Driver.load();
try {
if (getString(IGNITE_H2_DEBUG_CONSOLE) != null) {
Connection c = DriverManager.getConnection(dbUrl);
int port = getInteger(IGNITE_H2_DEBUG_CONSOLE_PORT, 0);
WebServer webSrv = new WebServer();
Server web = new Server(webSrv, "-webPort", Integer.toString(port));
web.start();
String url = webSrv.addSession(c);
U.quietAndInfo(log, "H2 debug console URL: " + url);
try {
Server.openBrowser(url);
} catch (Exception e) {
U.warn(log, "Failed to open browser: " + e.getMessage());
}
}
} catch (SQLException e) {
throw new IgniteCheckedException(e);
}
if (ctx == null) {
// This is allowed in some tests.
nodeId = UUID.randomUUID();
marshaller = new JdkMarshaller();
} else {
this.ctx = ctx;
schemas.put(QueryUtils.DFLT_SCHEMA, new H2Schema(QueryUtils.DFLT_SCHEMA));
valCtx = new CacheQueryObjectValueContext(ctx);
nodeId = ctx.localNodeId();
marshaller = ctx.config().getMarshaller();
mapQryExec = new GridMapQueryExecutor(busyLock);
rdcQryExec = new GridReduceQueryExecutor(qryIdGen, busyLock);
mapQryExec.start(ctx, this);
rdcQryExec.start(ctx, this);
stmtCacheCleanupTask = ctx.timeout().schedule(new Runnable() {
@Override
public void run() {
cleanupStatementCache();
}
}, CLEANUP_STMT_CACHE_PERIOD, CLEANUP_STMT_CACHE_PERIOD);
dmlProc = new DmlStatementsProcessor();
ddlProc = new DdlStatementsProcessor();
dmlProc.start(ctx, this);
ddlProc.start(ctx, this);
}
if (JdbcUtils.serializer != null)
U.warn(log, "Custom H2 serialization is already configured, will override.");
JdbcUtils.serializer = h2Serializer();
connCleanupTask = ctx.timeout().schedule(new Runnable() {
@Override
public void run() {
cleanupConnections();
}
}, CLEANUP_CONNECTIONS_PERIOD, CLEANUP_CONNECTIONS_PERIOD);
}
Aggregations