use of org.h2.table.Plan in project ignite by apache.
the class GridReduceQueryExecutor method query.
/**
* @param schemaName Schema name.
* @param qry Query.
* @param keepBinary Keep binary.
* @param enforceJoinOrder Enforce join order of tables.
* @param timeoutMillis Timeout in milliseconds.
* @param cancel Query cancel.
* @param params Query parameters.
* @param parts Partitions.
* @param lazy Lazy execution flag.
* @return Rows iterator.
*/
public Iterator<List<?>> query(String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, final int[] parts, boolean lazy) {
if (F.isEmpty(params))
params = EMPTY_PARAMS;
final boolean isReplicatedOnly = qry.isReplicatedOnly();
// Fail if all caches are replicated and explicit partitions are set.
for (int attempt = 0; ; attempt++) {
if (attempt != 0) {
try {
// Wait for exchange.
Thread.sleep(attempt * 10);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException("Query was interrupted.", e);
}
}
final long qryReqId = qryIdGen.incrementAndGet();
final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName, h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(), U.currentTimeMillis(), cancel);
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
// Check if topology is changed while retrying on locked topology.
if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
}
List<Integer> cacheIds = qry.cacheIds();
Collection<ClusterNode> nodes;
// Explicit partition mapping for unstable topology.
Map<ClusterNode, IntArray> partsMap = null;
// Explicit partitions mapping for query.
Map<ClusterNode, IntArray> qryMap = null;
// Partitions are not supported for queries over all replicated caches.
if (parts != null) {
boolean replicatedOnly = true;
for (Integer cacheId : cacheIds) {
if (!cacheContext(cacheId).isReplicated()) {
replicatedOnly = false;
break;
}
}
if (replicatedOnly)
throw new CacheException("Partitions are not supported for replicated caches");
}
if (qry.isLocal())
nodes = singletonList(ctx.discovery().localNode());
else {
NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
nodes = nodesParts.nodes();
partsMap = nodesParts.partitionsMap();
qryMap = nodesParts.queryPartitionsMap();
if (nodes == null)
// Retry.
continue;
assert !nodes.isEmpty();
if (isReplicatedOnly || qry.explain()) {
ClusterNode locNode = ctx.discovery().localNode();
// Always prefer local node if possible.
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else {
// Select random data node to run query on a replicated data or
// get EXPLAIN PLAN from a single node.
nodes = singletonList(F.rand(nodes));
}
}
}
int tblIdx = 0;
final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();
final int segmentsPerIndex = qry.explain() || isReplicatedOnly ? 1 : findFirstPartitioned(cacheIds).config().getQueryParallelism();
int replicatedQrysCnt = 0;
final Collection<ClusterNode> finalNodes = nodes;
for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
GridMergeIndex idx;
if (!skipMergeTbl) {
GridMergeTable tbl;
try {
tbl = createMergeTable(r.connection(), mapQry, qry.explain());
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
idx = tbl.getMergeIndex();
fakeTable(r.connection(), tblIdx++).innerTable(tbl);
} else
idx = GridMergeIndexUnsorted.createDummy(ctx);
// If the query has only replicated tables, we have to run it on a single node only.
if (!mapQry.isPartitioned()) {
ClusterNode node = F.rand(nodes);
mapQry.node(node.id());
replicatedQrysCnt++;
// Replicated tables can have only 1 segment.
idx.setSources(singletonList(node), 1);
} else
idx.setSources(nodes, segmentsPerIndex);
idx.setPageSize(r.pageSize());
r.indexes().add(idx);
}
r.latch(new CountDownLatch(isReplicatedOnly ? 1 : (r.indexes().size() - replicatedQrysCnt) * nodes.size() * segmentsPerIndex + replicatedQrysCnt));
runs.put(qryReqId, r);
boolean release = true;
try {
cancel.checkCancelled();
if (ctx.clientDisconnected()) {
throw new CacheException("Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException(ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
}
List<GridCacheSqlQuery> mapQrys = qry.mapQueries();
if (qry.explain()) {
mapQrys = new ArrayList<>(qry.mapQueries().size());
for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query()).parameterIndexes(mapQry.parameterIndexes()));
}
final boolean distributedJoins = qry.distributedJoins();
cancel.set(new Runnable() {
@Override
public void run() {
send(finalNodes, new GridQueryCancelRequest(qryReqId), null, false);
}
});
boolean retry = false;
// Always enforce join order on map side to have consistent behavior.
int flags = GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
if (distributedJoins)
flags |= GridH2QueryRequest.FLAG_DISTRIBUTED_JOINS;
if (qry.isLocal())
flags |= GridH2QueryRequest.FLAG_IS_LOCAL;
if (qry.explain())
flags |= GridH2QueryRequest.FLAG_EXPLAIN;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
if (lazy && mapQrys.size() == 1)
flags |= GridH2QueryRequest.FLAG_LAZY;
GridH2QueryRequest req = new GridH2QueryRequest().requestId(qryReqId).topologyVersion(topVer).pageSize(r.pageSize()).caches(qry.cacheIds()).tables(distributedJoins ? qry.tables() : null).partitions(convert(partsMap)).queries(mapQrys).parameters(params).flags(flags).timeout(timeoutMillis).schemaName(schemaName);
if (send(nodes, req, parts == null ? null : new ExplicitPartitionsSpecializer(qryMap), false)) {
awaitAllReplies(r, nodes, cancel);
Object state = r.state();
if (state != null) {
if (state instanceof CacheException) {
CacheException err = (CacheException) state;
if (err.getCause() instanceof IgniteClientDisconnectedException)
throw err;
if (wasCancelled(err))
// Throw correct exception.
throw new QueryCancelledException();
throw new CacheException("Failed to run map query remotely." + err.getMessage(), err);
}
if (state instanceof AffinityTopologyVersion) {
retry = true;
// If remote node asks us to retry then we have outdated full partition map.
h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
}
}
} else
// Send failed.
retry = true;
Iterator<List<?>> resIter = null;
if (!retry) {
if (skipMergeTbl) {
resIter = new GridMergeIndexIterator(this, finalNodes, r, qryReqId, qry.distributedJoins());
release = false;
} else {
cancel.checkCancelled();
UUID locNodeId = ctx.localNodeId();
H2Utils.setupConnection(r.connection(), false, enforceJoinOrder);
GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, qryReqId, REDUCE).pageSize(r.pageSize()).distributedJoinMode(OFF));
try {
if (qry.explain())
return explainPlan(r.connection(), qry, params);
GridCacheSqlQuery rdc = qry.reduceQuery();
ResultSet res = h2.executeSqlQueryWithTimer(r.connection(), rdc.query(), F.asList(rdc.parameters(params)), // The statement will cache some extra thread local objects.
false, timeoutMillis, cancel);
resIter = new H2FieldsIterator(res);
} finally {
GridH2QueryContext.clearThreadLocal();
}
}
}
if (retry) {
if (Thread.currentThread().isInterrupted())
throw new IgniteInterruptedCheckedException("Query was interrupted.");
continue;
}
return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.closeQuiet(r.connection());
if (e instanceof CacheException) {
if (wasCancelled((CacheException) e))
throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
throw (CacheException) e;
}
Throwable cause = e;
if (e instanceof IgniteCheckedException) {
Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
if (disconnectedErr != null)
cause = disconnectedErr;
}
throw new CacheException("Failed to run reduce query locally.", cause);
} finally {
if (release) {
releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins());
if (!skipMergeTbl) {
for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) // Drop all merge tables.
fakeTable(null, i).innerTable(null);
}
}
}
}
}
use of org.h2.table.Plan in project ignite by apache.
the class GridReduceQueryExecutor method planColumns.
/**
* @return Columns.
*/
private static ArrayList<Column> planColumns() {
ArrayList<Column> res = new ArrayList<>(1);
res.add(new Column("PLAN", Value.STRING));
return res;
}
use of org.h2.table.Plan in project ignite by apache.
the class UpdatePlanBuilder method planForBulkLoad.
/**
* Prepare update plan for COPY command (AKA bulk load).
*
* @param cmd Bulk load command
* @return The update plan for this command.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings("ConstantConditions")
public static UpdatePlan planForBulkLoad(SqlBulkLoadCommand cmd, GridH2Table tbl) throws IgniteCheckedException {
GridH2RowDescriptor desc = tbl.rowDescriptor();
if (desc == null)
throw new IgniteSQLException("Row descriptor undefined for table '" + tbl.getName() + "'", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR);
GridCacheContext<?, ?> cctx = desc.context();
List<String> cols = cmd.columns();
if (cols == null)
throw new IgniteSQLException("Columns are not defined", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR);
String[] colNames = new String[cols.size()];
int[] colTypes = new int[cols.size()];
int keyColIdx = -1;
int valColIdx = -1;
boolean hasKeyProps = false;
boolean hasValProps = false;
for (int i = 0; i < cols.size(); i++) {
String colName = cols.get(i);
colNames[i] = colName;
Column h2Col = tbl.getColumn(colName);
colTypes[i] = h2Col.getType();
int colId = h2Col.getColumnId();
if (desc.isKeyColumn(colId)) {
keyColIdx = i;
continue;
}
if (desc.isValueColumn(colId)) {
valColIdx = i;
continue;
}
GridQueryProperty prop = desc.type().property(colName);
assert prop != null : "Property '" + colName + "' not found.";
if (prop.key())
hasKeyProps = true;
else
hasValProps = true;
}
KeyValueSupplier keySupplier = createSupplier(cctx, desc.type(), keyColIdx, hasKeyProps, true, false);
KeyValueSupplier valSupplier = createSupplier(cctx, desc.type(), valColIdx, hasValProps, false, false);
return new UpdatePlan(UpdateMode.BULK_LOAD, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, valColIdx, null, true, null, 0, null, null);
}
use of org.h2.table.Plan in project ignite by apache.
the class UpdatePlanBuilder method planForUpdate.
/**
* Prepare update plan for UPDATE or DELETE.
*
* @param stmt UPDATE or DELETE statement.
* @param loc Local query flag.
* @param idx Indexing.
* @param conn Connection.
* @param fieldsQuery Original query.
* @param errKeysPos index to inject param for re-run keys at. Null if it's not a re-run plan.
* @return Update plan.
* @throws IgniteCheckedException if failed.
*/
private static UpdatePlan planForUpdate(GridSqlStatement stmt, boolean loc, IgniteH2Indexing idx, @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery, @Nullable Integer errKeysPos) throws IgniteCheckedException {
GridSqlElement target;
FastUpdate fastUpdate;
UpdateMode mode;
if (stmt instanceof GridSqlUpdate) {
// Let's verify that user is not trying to mess with key's columns directly
verifyUpdateColumns(stmt);
GridSqlUpdate update = (GridSqlUpdate) stmt;
target = update.target();
fastUpdate = DmlAstUtils.getFastUpdateArgs(update);
mode = UpdateMode.UPDATE;
} else if (stmt instanceof GridSqlDelete) {
GridSqlDelete del = (GridSqlDelete) stmt;
target = del.from();
fastUpdate = DmlAstUtils.getFastDeleteArgs(del);
mode = UpdateMode.DELETE;
} else
throw new IgniteSQLException("Unexpected DML operation [cls=" + stmt.getClass().getName() + ']', IgniteQueryErrorCode.UNEXPECTED_OPERATION);
GridSqlTable tbl = DmlAstUtils.gridTableForElement(target);
GridH2Table h2Tbl = tbl.dataTable();
GridH2RowDescriptor desc = h2Tbl.rowDescriptor();
if (desc == null)
throw new IgniteSQLException("Row descriptor undefined for table '" + h2Tbl.getName() + "'", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR);
if (fastUpdate != null) {
return new UpdatePlan(mode, h2Tbl, null, fastUpdate, null);
} else {
GridSqlSelect sel;
if (stmt instanceof GridSqlUpdate) {
List<GridSqlColumn> updatedCols = ((GridSqlUpdate) stmt).cols();
int valColIdx = -1;
String[] colNames = new String[updatedCols.size()];
int[] colTypes = new int[updatedCols.size()];
for (int i = 0; i < updatedCols.size(); i++) {
colNames[i] = updatedCols.get(i).columnName();
colTypes[i] = updatedCols.get(i).resultType().type();
Column col = updatedCols.get(i).column();
if (desc.isValueColumn(col.getColumnId()))
valColIdx = i;
}
boolean hasNewVal = (valColIdx != -1);
// Statement updates distinct properties if it does not have _val in updated columns list
// or if its list of updated columns includes only _val, i.e. is single element.
boolean hasProps = !hasNewVal || updatedCols.size() > 1;
// Index of new _val in results of SELECT
if (hasNewVal)
valColIdx += 2;
int newValColIdx = (hasNewVal ? valColIdx : 1);
KeyValueSupplier valSupplier = createSupplier(desc.context(), desc.type(), newValColIdx, hasProps, false, true);
sel = DmlAstUtils.selectForUpdate((GridSqlUpdate) stmt, errKeysPos);
String selectSql = sel.getSQL();
DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName());
return new UpdatePlan(UpdateMode.UPDATE, h2Tbl, colNames, colTypes, null, valSupplier, -1, valColIdx, selectSql, false, null, 0, null, distributed);
} else {
sel = DmlAstUtils.selectForDelete((GridSqlDelete) stmt, errKeysPos);
String selectSql = sel.getSQL();
DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName());
return new UpdatePlan(UpdateMode.DELETE, h2Tbl, selectSql, null, distributed);
}
}
}
use of org.h2.table.Plan in project ignite by apache.
the class GridH2CollocationModel method joinedWithCollocated.
/**
* @param f Filter.
* @return Affinity join type.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
private Affinity joinedWithCollocated(int f) {
TableFilter tf = childFilters[f];
GridH2Table tbl = (GridH2Table) tf.getTable();
if (validate) {
if (tbl.rowDescriptor().context().customAffinityMapper())
throw customAffinityError(tbl.cacheName());
if (F.isEmpty(tf.getIndexConditions())) {
throw new CacheException("Failed to prepare distributed join query: " + "join condition does not use index [joinedCache=" + tbl.cacheName() + ", plan=" + tf.getSelect().getPlanSQL() + ']');
}
}
IndexColumn affCol = tbl.getAffinityKeyColumn();
boolean affKeyCondFound = false;
if (affCol != null) {
ArrayList<IndexCondition> idxConditions = tf.getIndexConditions();
int affColId = affCol.column.getColumnId();
for (int i = 0; i < idxConditions.size(); i++) {
IndexCondition c = idxConditions.get(i);
int colId = c.getColumn().getColumnId();
int cmpType = c.getCompareType();
if ((cmpType == Comparison.EQUAL || cmpType == Comparison.EQUAL_NULL_SAFE) && (colId == affColId || tbl.rowDescriptor().isKeyColumn(colId)) && c.isEvaluatable()) {
affKeyCondFound = true;
Expression exp = c.getExpression();
exp = exp.getNonAliasExpression();
if (exp instanceof ExpressionColumn) {
ExpressionColumn expCol = (ExpressionColumn) exp;
// This is one of our previous joins.
TableFilter prevJoin = expCol.getTableFilter();
if (prevJoin != null) {
GridH2CollocationModel cm = child(indexOf(prevJoin), true);
// different affinity columns from different tables.
if (cm != null && !cm.view) {
Type t = cm.type(true);
if (t.isPartitioned() && t.isCollocated() && isAffinityColumn(prevJoin, expCol, validate))
return Affinity.COLLOCATED_JOIN;
}
}
}
}
}
}
return affKeyCondFound ? Affinity.HAS_AFFINITY_CONDITION : Affinity.NONE;
}
Aggregations