Search in sources :

Example 16 with Plan

use of org.h2.table.Plan in project ignite by apache.

the class GridReduceQueryExecutor method query.

/**
 * @param schemaName Schema name.
 * @param qry Query.
 * @param keepBinary Keep binary.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param timeoutMillis Timeout in milliseconds.
 * @param cancel Query cancel.
 * @param params Query parameters.
 * @param parts Partitions.
 * @param lazy Lazy execution flag.
 * @return Rows iterator.
 */
public Iterator<List<?>> query(String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, final int[] parts, boolean lazy) {
    if (F.isEmpty(params))
        params = EMPTY_PARAMS;
    final boolean isReplicatedOnly = qry.isReplicatedOnly();
    // Fail if all caches are replicated and explicit partitions are set.
    for (int attempt = 0; ; attempt++) {
        if (attempt != 0) {
            try {
                // Wait for exchange.
                Thread.sleep(attempt * 10);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw new CacheException("Query was interrupted.", e);
            }
        }
        final long qryReqId = qryIdGen.incrementAndGet();
        final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName, h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(), U.currentTimeMillis(), cancel);
        AffinityTopologyVersion topVer = h2.readyTopologyVersion();
        // Check if topology is changed while retrying on locked topology.
        if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
            throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
        }
        List<Integer> cacheIds = qry.cacheIds();
        Collection<ClusterNode> nodes;
        // Explicit partition mapping for unstable topology.
        Map<ClusterNode, IntArray> partsMap = null;
        // Explicit partitions mapping for query.
        Map<ClusterNode, IntArray> qryMap = null;
        // Partitions are not supported for queries over all replicated caches.
        if (parts != null) {
            boolean replicatedOnly = true;
            for (Integer cacheId : cacheIds) {
                if (!cacheContext(cacheId).isReplicated()) {
                    replicatedOnly = false;
                    break;
                }
            }
            if (replicatedOnly)
                throw new CacheException("Partitions are not supported for replicated caches");
        }
        if (qry.isLocal())
            nodes = singletonList(ctx.discovery().localNode());
        else {
            NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
            nodes = nodesParts.nodes();
            partsMap = nodesParts.partitionsMap();
            qryMap = nodesParts.queryPartitionsMap();
            if (nodes == null)
                // Retry.
                continue;
            assert !nodes.isEmpty();
            if (isReplicatedOnly || qry.explain()) {
                ClusterNode locNode = ctx.discovery().localNode();
                // Always prefer local node if possible.
                if (nodes.contains(locNode))
                    nodes = singletonList(locNode);
                else {
                    // Select random data node to run query on a replicated data or
                    // get EXPLAIN PLAN from a single node.
                    nodes = singletonList(F.rand(nodes));
                }
            }
        }
        int tblIdx = 0;
        final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();
        final int segmentsPerIndex = qry.explain() || isReplicatedOnly ? 1 : findFirstPartitioned(cacheIds).config().getQueryParallelism();
        int replicatedQrysCnt = 0;
        final Collection<ClusterNode> finalNodes = nodes;
        for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
            GridMergeIndex idx;
            if (!skipMergeTbl) {
                GridMergeTable tbl;
                try {
                    tbl = createMergeTable(r.connection(), mapQry, qry.explain());
                } catch (IgniteCheckedException e) {
                    throw new IgniteException(e);
                }
                idx = tbl.getMergeIndex();
                fakeTable(r.connection(), tblIdx++).innerTable(tbl);
            } else
                idx = GridMergeIndexUnsorted.createDummy(ctx);
            // If the query has only replicated tables, we have to run it on a single node only.
            if (!mapQry.isPartitioned()) {
                ClusterNode node = F.rand(nodes);
                mapQry.node(node.id());
                replicatedQrysCnt++;
                // Replicated tables can have only 1 segment.
                idx.setSources(singletonList(node), 1);
            } else
                idx.setSources(nodes, segmentsPerIndex);
            idx.setPageSize(r.pageSize());
            r.indexes().add(idx);
        }
        r.latch(new CountDownLatch(isReplicatedOnly ? 1 : (r.indexes().size() - replicatedQrysCnt) * nodes.size() * segmentsPerIndex + replicatedQrysCnt));
        runs.put(qryReqId, r);
        boolean release = true;
        try {
            cancel.checkCancelled();
            if (ctx.clientDisconnected()) {
                throw new CacheException("Query was cancelled, client node disconnected.", new IgniteClientDisconnectedException(ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
            }
            List<GridCacheSqlQuery> mapQrys = qry.mapQueries();
            if (qry.explain()) {
                mapQrys = new ArrayList<>(qry.mapQueries().size());
                for (GridCacheSqlQuery mapQry : qry.mapQueries()) mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query()).parameterIndexes(mapQry.parameterIndexes()));
            }
            final boolean distributedJoins = qry.distributedJoins();
            cancel.set(new Runnable() {

                @Override
                public void run() {
                    send(finalNodes, new GridQueryCancelRequest(qryReqId), null, false);
                }
            });
            boolean retry = false;
            // Always enforce join order on map side to have consistent behavior.
            int flags = GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
            if (distributedJoins)
                flags |= GridH2QueryRequest.FLAG_DISTRIBUTED_JOINS;
            if (qry.isLocal())
                flags |= GridH2QueryRequest.FLAG_IS_LOCAL;
            if (qry.explain())
                flags |= GridH2QueryRequest.FLAG_EXPLAIN;
            if (isReplicatedOnly)
                flags |= GridH2QueryRequest.FLAG_REPLICATED;
            if (lazy && mapQrys.size() == 1)
                flags |= GridH2QueryRequest.FLAG_LAZY;
            GridH2QueryRequest req = new GridH2QueryRequest().requestId(qryReqId).topologyVersion(topVer).pageSize(r.pageSize()).caches(qry.cacheIds()).tables(distributedJoins ? qry.tables() : null).partitions(convert(partsMap)).queries(mapQrys).parameters(params).flags(flags).timeout(timeoutMillis).schemaName(schemaName);
            if (send(nodes, req, parts == null ? null : new ExplicitPartitionsSpecializer(qryMap), false)) {
                awaitAllReplies(r, nodes, cancel);
                Object state = r.state();
                if (state != null) {
                    if (state instanceof CacheException) {
                        CacheException err = (CacheException) state;
                        if (err.getCause() instanceof IgniteClientDisconnectedException)
                            throw err;
                        if (wasCancelled(err))
                            // Throw correct exception.
                            throw new QueryCancelledException();
                        throw new CacheException("Failed to run map query remotely." + err.getMessage(), err);
                    }
                    if (state instanceof AffinityTopologyVersion) {
                        retry = true;
                        // If remote node asks us to retry then we have outdated full partition map.
                        h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
                    }
                }
            } else
                // Send failed.
                retry = true;
            Iterator<List<?>> resIter = null;
            if (!retry) {
                if (skipMergeTbl) {
                    resIter = new GridMergeIndexIterator(this, finalNodes, r, qryReqId, qry.distributedJoins());
                    release = false;
                } else {
                    cancel.checkCancelled();
                    UUID locNodeId = ctx.localNodeId();
                    H2Utils.setupConnection(r.connection(), false, enforceJoinOrder);
                    GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, qryReqId, REDUCE).pageSize(r.pageSize()).distributedJoinMode(OFF));
                    try {
                        if (qry.explain())
                            return explainPlan(r.connection(), qry, params);
                        GridCacheSqlQuery rdc = qry.reduceQuery();
                        ResultSet res = h2.executeSqlQueryWithTimer(r.connection(), rdc.query(), F.asList(rdc.parameters(params)), // The statement will cache some extra thread local objects.
                        false, timeoutMillis, cancel);
                        resIter = new H2FieldsIterator(res);
                    } finally {
                        GridH2QueryContext.clearThreadLocal();
                    }
                }
            }
            if (retry) {
                if (Thread.currentThread().isInterrupted())
                    throw new IgniteInterruptedCheckedException("Query was interrupted.");
                continue;
            }
            return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
        } catch (IgniteCheckedException | RuntimeException e) {
            release = true;
            U.closeQuiet(r.connection());
            if (e instanceof CacheException) {
                if (wasCancelled((CacheException) e))
                    throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
                throw (CacheException) e;
            }
            Throwable cause = e;
            if (e instanceof IgniteCheckedException) {
                Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
                if (disconnectedErr != null)
                    cause = disconnectedErr;
            }
            throw new CacheException("Failed to run reduce query locally.", cause);
        } finally {
            if (release) {
                releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins());
                if (!skipMergeTbl) {
                    for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) // Drop all merge tables.
                    fakeTable(null, i).innerTable(null);
                }
            }
        }
    }
}
Also used : GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) CacheException(javax.cache.CacheException) H2FieldsIterator(org.apache.ignite.internal.processors.query.h2.H2FieldsIterator) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) TransactionException(org.apache.ignite.transactions.TransactionException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray) IgniteException(org.apache.ignite.IgniteException) ResultSet(java.sql.ResultSet) Collections.singletonList(java.util.Collections.singletonList) GridIntList(org.apache.ignite.internal.util.GridIntList) List(java.util.List) ArrayList(java.util.ArrayList) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) UUID(java.util.UUID) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext) ClusterNode(org.apache.ignite.cluster.ClusterNode) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) GridH2QueryRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest) CountDownLatch(java.util.concurrent.CountDownLatch) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException)

Example 17 with Plan

use of org.h2.table.Plan in project ignite by apache.

the class GridReduceQueryExecutor method planColumns.

/**
 * @return Columns.
 */
private static ArrayList<Column> planColumns() {
    ArrayList<Column> res = new ArrayList<>(1);
    res.add(new Column("PLAN", Value.STRING));
    return res;
}
Also used : GridSqlSortColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn) Column(org.h2.table.Column) ArrayList(java.util.ArrayList)

Example 18 with Plan

use of org.h2.table.Plan in project ignite by apache.

the class UpdatePlanBuilder method planForBulkLoad.

/**
 * Prepare update plan for COPY command (AKA bulk load).
 *
 * @param cmd Bulk load command
 * @return The update plan for this command.
 * @throws IgniteCheckedException if failed.
 */
@SuppressWarnings("ConstantConditions")
public static UpdatePlan planForBulkLoad(SqlBulkLoadCommand cmd, GridH2Table tbl) throws IgniteCheckedException {
    GridH2RowDescriptor desc = tbl.rowDescriptor();
    if (desc == null)
        throw new IgniteSQLException("Row descriptor undefined for table '" + tbl.getName() + "'", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR);
    GridCacheContext<?, ?> cctx = desc.context();
    List<String> cols = cmd.columns();
    if (cols == null)
        throw new IgniteSQLException("Columns are not defined", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR);
    String[] colNames = new String[cols.size()];
    int[] colTypes = new int[cols.size()];
    int keyColIdx = -1;
    int valColIdx = -1;
    boolean hasKeyProps = false;
    boolean hasValProps = false;
    for (int i = 0; i < cols.size(); i++) {
        String colName = cols.get(i);
        colNames[i] = colName;
        Column h2Col = tbl.getColumn(colName);
        colTypes[i] = h2Col.getType();
        int colId = h2Col.getColumnId();
        if (desc.isKeyColumn(colId)) {
            keyColIdx = i;
            continue;
        }
        if (desc.isValueColumn(colId)) {
            valColIdx = i;
            continue;
        }
        GridQueryProperty prop = desc.type().property(colName);
        assert prop != null : "Property '" + colName + "' not found.";
        if (prop.key())
            hasKeyProps = true;
        else
            hasValProps = true;
    }
    KeyValueSupplier keySupplier = createSupplier(cctx, desc.type(), keyColIdx, hasKeyProps, true, false);
    KeyValueSupplier valSupplier = createSupplier(cctx, desc.type(), valColIdx, hasValProps, false, false);
    return new UpdatePlan(UpdateMode.BULK_LOAD, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, valColIdx, null, true, null, 0, null, null);
}
Also used : GridQueryProperty(org.apache.ignite.internal.processors.query.GridQueryProperty) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) Column(org.h2.table.Column) GridSqlColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Example 19 with Plan

use of org.h2.table.Plan in project ignite by apache.

the class UpdatePlanBuilder method planForUpdate.

/**
 * Prepare update plan for UPDATE or DELETE.
 *
 * @param stmt UPDATE or DELETE statement.
 * @param loc Local query flag.
 * @param idx Indexing.
 * @param conn Connection.
 * @param fieldsQuery Original query.
 * @param errKeysPos index to inject param for re-run keys at. Null if it's not a re-run plan.
 * @return Update plan.
 * @throws IgniteCheckedException if failed.
 */
private static UpdatePlan planForUpdate(GridSqlStatement stmt, boolean loc, IgniteH2Indexing idx, @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery, @Nullable Integer errKeysPos) throws IgniteCheckedException {
    GridSqlElement target;
    FastUpdate fastUpdate;
    UpdateMode mode;
    if (stmt instanceof GridSqlUpdate) {
        // Let's verify that user is not trying to mess with key's columns directly
        verifyUpdateColumns(stmt);
        GridSqlUpdate update = (GridSqlUpdate) stmt;
        target = update.target();
        fastUpdate = DmlAstUtils.getFastUpdateArgs(update);
        mode = UpdateMode.UPDATE;
    } else if (stmt instanceof GridSqlDelete) {
        GridSqlDelete del = (GridSqlDelete) stmt;
        target = del.from();
        fastUpdate = DmlAstUtils.getFastDeleteArgs(del);
        mode = UpdateMode.DELETE;
    } else
        throw new IgniteSQLException("Unexpected DML operation [cls=" + stmt.getClass().getName() + ']', IgniteQueryErrorCode.UNEXPECTED_OPERATION);
    GridSqlTable tbl = DmlAstUtils.gridTableForElement(target);
    GridH2Table h2Tbl = tbl.dataTable();
    GridH2RowDescriptor desc = h2Tbl.rowDescriptor();
    if (desc == null)
        throw new IgniteSQLException("Row descriptor undefined for table '" + h2Tbl.getName() + "'", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR);
    if (fastUpdate != null) {
        return new UpdatePlan(mode, h2Tbl, null, fastUpdate, null);
    } else {
        GridSqlSelect sel;
        if (stmt instanceof GridSqlUpdate) {
            List<GridSqlColumn> updatedCols = ((GridSqlUpdate) stmt).cols();
            int valColIdx = -1;
            String[] colNames = new String[updatedCols.size()];
            int[] colTypes = new int[updatedCols.size()];
            for (int i = 0; i < updatedCols.size(); i++) {
                colNames[i] = updatedCols.get(i).columnName();
                colTypes[i] = updatedCols.get(i).resultType().type();
                Column col = updatedCols.get(i).column();
                if (desc.isValueColumn(col.getColumnId()))
                    valColIdx = i;
            }
            boolean hasNewVal = (valColIdx != -1);
            // Statement updates distinct properties if it does not have _val in updated columns list
            // or if its list of updated columns includes only _val, i.e. is single element.
            boolean hasProps = !hasNewVal || updatedCols.size() > 1;
            // Index of new _val in results of SELECT
            if (hasNewVal)
                valColIdx += 2;
            int newValColIdx = (hasNewVal ? valColIdx : 1);
            KeyValueSupplier valSupplier = createSupplier(desc.context(), desc.type(), newValColIdx, hasProps, false, true);
            sel = DmlAstUtils.selectForUpdate((GridSqlUpdate) stmt, errKeysPos);
            String selectSql = sel.getSQL();
            DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName());
            return new UpdatePlan(UpdateMode.UPDATE, h2Tbl, colNames, colTypes, null, valSupplier, -1, valColIdx, selectSql, false, null, 0, null, distributed);
        } else {
            sel = DmlAstUtils.selectForDelete((GridSqlDelete) stmt, errKeysPos);
            String selectSql = sel.getSQL();
            DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName());
            return new UpdatePlan(UpdateMode.DELETE, h2Tbl, selectSql, null, distributed);
        }
    }
}
Also used : GridSqlDelete(org.apache.ignite.internal.processors.query.h2.sql.GridSqlDelete) GridSqlSelect(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) Column(org.h2.table.Column) GridSqlColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn) GridSqlTable(org.apache.ignite.internal.processors.query.h2.sql.GridSqlTable) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) GridSqlColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridSqlElement(org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement) GridSqlUpdate(org.apache.ignite.internal.processors.query.h2.sql.GridSqlUpdate)

Example 20 with Plan

use of org.h2.table.Plan in project ignite by apache.

the class GridH2CollocationModel method joinedWithCollocated.

/**
 * @param f Filter.
 * @return Affinity join type.
 */
@SuppressWarnings("ForLoopReplaceableByForEach")
private Affinity joinedWithCollocated(int f) {
    TableFilter tf = childFilters[f];
    GridH2Table tbl = (GridH2Table) tf.getTable();
    if (validate) {
        if (tbl.rowDescriptor().context().customAffinityMapper())
            throw customAffinityError(tbl.cacheName());
        if (F.isEmpty(tf.getIndexConditions())) {
            throw new CacheException("Failed to prepare distributed join query: " + "join condition does not use index [joinedCache=" + tbl.cacheName() + ", plan=" + tf.getSelect().getPlanSQL() + ']');
        }
    }
    IndexColumn affCol = tbl.getAffinityKeyColumn();
    boolean affKeyCondFound = false;
    if (affCol != null) {
        ArrayList<IndexCondition> idxConditions = tf.getIndexConditions();
        int affColId = affCol.column.getColumnId();
        for (int i = 0; i < idxConditions.size(); i++) {
            IndexCondition c = idxConditions.get(i);
            int colId = c.getColumn().getColumnId();
            int cmpType = c.getCompareType();
            if ((cmpType == Comparison.EQUAL || cmpType == Comparison.EQUAL_NULL_SAFE) && (colId == affColId || tbl.rowDescriptor().isKeyColumn(colId)) && c.isEvaluatable()) {
                affKeyCondFound = true;
                Expression exp = c.getExpression();
                exp = exp.getNonAliasExpression();
                if (exp instanceof ExpressionColumn) {
                    ExpressionColumn expCol = (ExpressionColumn) exp;
                    // This is one of our previous joins.
                    TableFilter prevJoin = expCol.getTableFilter();
                    if (prevJoin != null) {
                        GridH2CollocationModel cm = child(indexOf(prevJoin), true);
                        // different affinity columns from different tables.
                        if (cm != null && !cm.view) {
                            Type t = cm.type(true);
                            if (t.isPartitioned() && t.isCollocated() && isAffinityColumn(prevJoin, expCol, validate))
                                return Affinity.COLLOCATED_JOIN;
                        }
                    }
                }
            }
        }
    }
    return affKeyCondFound ? Affinity.HAS_AFFINITY_CONDITION : Affinity.NONE;
}
Also used : TableFilter(org.h2.table.TableFilter) CacheException(javax.cache.CacheException) Expression(org.h2.expression.Expression) IndexCondition(org.h2.index.IndexCondition) IndexColumn(org.h2.table.IndexColumn) ExpressionColumn(org.h2.expression.ExpressionColumn)

Aggregations

PreparedStatement (java.sql.PreparedStatement)15 ResultSet (java.sql.ResultSet)14 Connection (java.sql.Connection)13 Statement (java.sql.Statement)13 SimpleResultSet (org.h2.tools.SimpleResultSet)9 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)7 Column (org.h2.table.Column)7 StatementBuilder (org.h2.util.StatementBuilder)6 ValueString (org.h2.value.ValueString)6 ArrayList (java.util.ArrayList)5 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)4 IgniteException (org.apache.ignite.IgniteException)4 GridH2RowDescriptor (org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor)4 Prepared (org.h2.command.Prepared)4 Expression (org.h2.expression.Expression)4 LinkedHashMap (java.util.LinkedHashMap)3 List (java.util.List)3 GridQueryCacheObjectsIterator (org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator)3 UpdatePlan (org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan)3 GridSqlColumn (org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn)3