use of org.h2.command.dml.Explain in project ignite by apache.
the class GridSqlDelete method getSQL.
/**
* {@inheritDoc}
*/
@Override
public String getSQL() {
StatementBuilder buff = new StatementBuilder(explain() ? "EXPLAIN " : "");
buff.append("DELETE").append("\nFROM ").append(from.getSQL());
if (where != null)
buff.append("\nWHERE ").append(StringUtils.unEnclose(where.getSQL()));
if (limit != null)
buff.append("\nLIMIT (").append(StringUtils.unEnclose(limit.getSQL())).append(')');
return buff.toString();
}
use of org.h2.command.dml.Explain in project ignite by apache.
the class GridSqlQuerySplitter method split.
/**
* @param conn Connection.
* @param prepared Prepared.
* @param params Parameters.
* @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
* @param distributedJoins If distributed joins enabled.
* @param enforceJoinOrder Enforce join order.
* @param h2 Indexing.
* @return Two step query.
* @throws SQLException If failed.
* @throws IgniteCheckedException If failed.
*/
public static GridCacheTwoStepQuery split(Connection conn, Prepared prepared, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
if (params == null)
params = GridCacheSqlQuery.EMPTY_PARAMS;
// Here we will just do initial query parsing. Do not use optimized
// subqueries because we do not have unique FROM aliases yet.
GridSqlQuery qry = parse(prepared, false);
String originalSql = qry.getSQL();
// debug("ORIGINAL", originalSql);
final boolean explain = qry.explain();
qry.explain(false);
GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
// Normalization will generate unique aliases for all the table filters in FROM.
// Also it will collect all tables and schemas from the query.
splitter.normalizeQuery(qry);
// debug("NORMALIZED", qry.getSQL());
// Here we will have correct normalized AST with optimized join order.
// The distributedJoins parameter is ignored because it is not relevant for
// the REDUCE query optimization.
qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
// Do the actual query split. We will update the original query AST, need to be careful.
splitter.splitQuery(qry);
// We must have at least one map query.
assert !F.isEmpty(splitter.mapSqlQrys) : "map";
// We must have a reduce query.
assert splitter.rdcSqlQry != null : "rdc";
// distributed joins at all.
if (distributedJoins) {
boolean allCollocated = true;
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
Prepared prepared0 = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
allCollocated &= isCollocated((Query) prepared0);
mapSqlQry.query(parse(prepared0, true).getSQL());
}
// We do not need distributed joins if all MAP queries are collocated.
if (allCollocated)
distributedJoins = false;
}
// Setup resulting two step query and return it.
GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
twoStepQry.reduceQuery(splitter.rdcSqlQry);
for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
twoStepQry.skipMergeTable(splitter.rdcQrySimple);
twoStepQry.explain(explain);
twoStepQry.distributedJoins(distributedJoins);
// all map queries must have non-empty derivedPartitions to use this feature.
twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
return twoStepQry;
}
use of org.h2.command.dml.Explain in project ignite by apache.
the class GridSqlSelect method getSQL.
/**
* {@inheritDoc}
*/
@Override
public String getSQL() {
StatementBuilder buff = new StatementBuilder(explain() ? "EXPLAIN SELECT" : "SELECT");
if (distinct)
buff.append(" DISTINCT");
for (GridSqlAst expression : columns(true)) {
buff.appendExceptFirst(",");
buff.append('\n');
buff.append(expression.getSQL());
}
if (from != null)
buff.append("\nFROM ").append(from.getSQL());
if (where != null)
buff.append("\nWHERE ").append(StringUtils.unEnclose(where.getSQL()));
if (grpCols != null) {
buff.append("\nGROUP BY ");
buff.resetCount();
for (int grpCol : grpCols) {
buff.appendExceptFirst(", ");
addAlias(buff, cols.get(grpCol));
}
}
if (havingCol >= 0) {
buff.append("\nHAVING ");
addAlias(buff, cols.get(havingCol));
}
getSortLimitSQL(buff);
return buff.toString();
}
use of org.h2.command.dml.Explain in project h2database by h2database.
the class CommandRemote method sendParameters.
private void sendParameters(Transfer transfer) throws IOException {
int len = parameters.size();
transfer.writeInt(len);
for (ParameterInterface p : parameters) {
Value pVal = p.getParamValue();
if (pVal == null && cmdType == EXPLAIN) {
pVal = ValueNull.INSTANCE;
}
transfer.writeValue(pVal);
}
}
use of org.h2.command.dml.Explain in project h2database by h2database.
the class Parser method parseExplain.
private Explain parseExplain() {
Explain command = new Explain(session);
if (readIf("ANALYZE")) {
command.setExecuteCommand(true);
} else {
if (readIf("PLAN")) {
readIf("FOR");
}
}
if (isToken("SELECT") || isToken("FROM") || isToken("(") || isToken("WITH")) {
Query query = parseSelect();
query.setNeverLazy(true);
command.setCommand(query);
} else if (readIf("DELETE")) {
command.setCommand(parseDelete());
} else if (readIf("UPDATE")) {
command.setCommand(parseUpdate());
} else if (readIf("INSERT")) {
command.setCommand(parseInsert());
} else if (readIf("MERGE")) {
command.setCommand(parseMerge());
} else {
throw getSyntaxError();
}
return command;
}
Aggregations