Search in sources :

Example 1 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class DmlStatementsProcessor method getPlanForStatement.

/**
     * Generate SELECT statements to retrieve data for modifications from and find fast UPDATE or DELETE args,
     * if available.
     *
     * @param schema Schema.
     * @param prepStmt JDBC statement.
     * @return Update plan.
     */
@SuppressWarnings({ "unchecked", "ConstantConditions" })
private UpdatePlan getPlanForStatement(String schema, PreparedStatement prepStmt, @Nullable Integer errKeysPos) throws IgniteCheckedException {
    Prepared p = GridSqlQueryParser.prepared(prepStmt);
    H2DmlPlanKey planKey = new H2DmlPlanKey(schema, p.getSQL());
    UpdatePlan res = (errKeysPos == null ? planCache.get(planKey) : null);
    if (res != null)
        return res;
    res = UpdatePlanBuilder.planForStatement(p, errKeysPos);
    // Don't cache re-runs
    if (errKeysPos == null)
        return U.firstNotNull(planCache.putIfAbsent(planKey, res), res);
    else
        return res;
}
Also used : Prepared(org.h2.command.Prepared) UpdatePlan(org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan)

Example 2 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class DmlStatementsProcessor method streamUpdateQuery.

/**
     * Perform given statement against given data streamer. Only rows based INSERT and MERGE are supported
     * as well as key bound UPDATE and DELETE (ones with filter {@code WHERE _key = ?}).
     *
     * @param streamer Streamer to feed data to.
     * @param stmt Statement.
     * @param args Statement arguments.
     * @return Number of rows in given statement for INSERT and MERGE, {@code 1} otherwise.
     * @throws IgniteCheckedException if failed.
     */
@SuppressWarnings({ "unchecked", "ConstantConditions" })
long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Object[] args) throws IgniteCheckedException {
    args = U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY);
    Prepared p = GridSqlQueryParser.prepared(stmt);
    assert p != null;
    UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, null);
    if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name()))
        throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
    if (plan.mode == UpdateMode.INSERT && plan.rowsNum > 0) {
        assert plan.isLocSubqry;
        final GridCacheContext cctx = plan.tbl.rowDescriptor().context();
        QueryCursorImpl<List<?>> cur;
        final ArrayList<List<?>> data = new ArrayList<>(plan.rowsNum);
        final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQry, F.asList(args), null, false, 0, null);
        QueryCursorImpl<List<?>> stepCur = new QueryCursorImpl<>(new Iterable<List<?>>() {

            @Override
            public Iterator<List<?>> iterator() {
                try {
                    return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), cctx.keepBinary());
                } catch (IgniteCheckedException e) {
                    throw new IgniteException(e);
                }
            }
        }, null);
        data.addAll(stepCur.getAll());
        cur = new QueryCursorImpl<>(new Iterable<List<?>>() {

            @Override
            public Iterator<List<?>> iterator() {
                return data.iterator();
            }
        }, null);
        if (plan.rowsNum == 1) {
            IgniteBiTuple t = rowToKeyValue(cctx, cur.iterator().next(), plan);
            streamer.addData(t.getKey(), t.getValue());
            return 1;
        }
        Map<Object, Object> rows = new LinkedHashMap<>(plan.rowsNum);
        for (List<?> row : cur) {
            final IgniteBiTuple t = rowToKeyValue(cctx, row, plan);
            rows.put(t.getKey(), t.getValue());
        }
        streamer.addData(rows);
        return rows.size();
    } else
        throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
Also used : GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) IgniteBiTuple(org.apache.ignite.lang.IgniteBiTuple) Prepared(org.h2.command.Prepared) ArrayList(java.util.ArrayList) QueryCursorImpl(org.apache.ignite.internal.processors.cache.QueryCursorImpl) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) GridQueryFieldsResult(org.apache.ignite.internal.processors.query.GridQueryFieldsResult) LinkedHashMap(java.util.LinkedHashMap) GridBoundedConcurrentLinkedHashMap(org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) IgniteSingletonIterator(org.apache.ignite.internal.util.lang.IgniteSingletonIterator) Iterator(java.util.Iterator) List(java.util.List) ArrayList(java.util.ArrayList) BinaryObject(org.apache.ignite.binary.BinaryObject) UpdatePlan(org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan)

Example 3 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class GridSqlQuerySplitter method split.

/**
     * @param stmt Prepared statement.
     * @param params Parameters.
     * @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
     * @param distributedJoins If distributed joins enabled.
     * @param enforceJoinOrder Enforce join order.
     * @param h2 Indexing.
     * @return Two step query.
     * @throws SQLException If failed.
     * @throws IgniteCheckedException If failed.
     */
public static GridCacheTwoStepQuery split(JdbcPreparedStatement stmt, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
    if (params == null)
        params = GridCacheSqlQuery.EMPTY_PARAMS;
    // Here we will just do initial query parsing. Do not use optimized
    // subqueries because we do not have unique FROM aliases yet.
    GridSqlQuery qry = parse(prepared(stmt), false);
    String originalSql = qry.getSQL();
    final boolean explain = qry.explain();
    qry.explain(false);
    GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
    // Normalization will generate unique aliases for all the table filters in FROM.
    // Also it will collect all tables and schemas from the query.
    splitter.normalizeQuery(qry);
    Connection conn = stmt.getConnection();
    // Here we will have correct normalized AST with optimized join order.
    // The distributedJoins parameter is ignored because it is not relevant for
    // the REDUCE query optimization.
    qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
    // Do the actual query split. We will update the original query AST, need to be careful.
    splitter.splitQuery(qry);
    // We must have at least one map query.
    assert !F.isEmpty(splitter.mapSqlQrys) : "map";
    // We must have a reduce query.
    assert splitter.rdcSqlQry != null : "rdc";
    // distributed joins at all.
    if (distributedJoins) {
        boolean allCollocated = true;
        for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
            Prepared prepared = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
            allCollocated &= isCollocated((Query) prepared);
            mapSqlQry.query(parse(prepared, true).getSQL());
        }
        // We do not need distributed joins if all MAP queries are collocated.
        if (allCollocated)
            distributedJoins = false;
    }
    // Setup resulting two step query and return it.
    GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
    twoStepQry.reduceQuery(splitter.rdcSqlQry);
    for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
    twoStepQry.skipMergeTable(splitter.rdcQrySimple);
    twoStepQry.explain(explain);
    twoStepQry.distributedJoins(distributedJoins);
    // all map queries must have non-empty derivedPartitions to use this feature.
    twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
    return twoStepQry;
}
Also used : GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) Query(org.h2.command.dml.Query) Connection(java.sql.Connection) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)

Example 4 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class DmlAstUtils method getFastUpdateArgs.

/**
     * @param update UPDATE statement.
     * @return {@code null} if given statement directly updates {@code _val} column with a literal or param value
     * and filters by single non expression key (and, optionally,  by single non expression value).
     */
public static FastUpdateArguments getFastUpdateArgs(GridSqlUpdate update) {
    IgnitePair<GridSqlElement> filter = findKeyValueEqualityCondition(update.where());
    if (filter == null)
        return null;
    if (update.cols().size() != 1)
        return null;
    Table tbl = update.cols().get(0).column().getTable();
    if (!(tbl instanceof GridH2Table))
        return null;
    GridH2RowDescriptor desc = ((GridH2Table) tbl).rowDescriptor();
    if (!desc.isValueColumn(update.cols().get(0).column().getColumnId()))
        return null;
    GridSqlElement set = update.set().get(update.cols().get(0).columnName());
    if (!(set instanceof GridSqlConst || set instanceof GridSqlParameter))
        return null;
    return new FastUpdateArguments(operandForElement(filter.getKey()), operandForElement(filter.getValue()), operandForElement(set));
}
Also used : GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) Table(org.h2.table.Table) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) FastUpdateArguments(org.apache.ignite.internal.processors.query.h2.dml.FastUpdateArguments)

Example 5 with Update

use of org.h2.command.dml.Update in project ignite by apache.

the class DmlAstUtils method selectForUpdate.

/**
     * Generate SQL SELECT based on UPDATE's WHERE, LIMIT, etc.
     *
     * @param update Update statement.
     * @param keysParamIdx Index of new param for the array of keys.
     * @return SELECT statement.
     */
public static GridSqlSelect selectForUpdate(GridSqlUpdate update, @Nullable Integer keysParamIdx) {
    GridSqlSelect mapQry = new GridSqlSelect();
    mapQry.from(update.target());
    Set<GridSqlTable> tbls = new HashSet<>();
    collectAllGridTablesInTarget(update.target(), tbls);
    assert tbls.size() == 1 : "Failed to determine target table for UPDATE";
    GridSqlTable tbl = tbls.iterator().next();
    GridH2Table gridTbl = tbl.dataTable();
    assert gridTbl != null : "Failed to determine target grid table for UPDATE";
    Column h2KeyCol = gridTbl.getColumn(GridH2AbstractKeyValueRow.KEY_COL);
    Column h2ValCol = gridTbl.getColumn(GridH2AbstractKeyValueRow.VAL_COL);
    GridSqlColumn keyCol = new GridSqlColumn(h2KeyCol, tbl, h2KeyCol.getName());
    keyCol.resultType(GridSqlType.fromColumn(h2KeyCol));
    GridSqlColumn valCol = new GridSqlColumn(h2ValCol, tbl, h2ValCol.getName());
    valCol.resultType(GridSqlType.fromColumn(h2ValCol));
    mapQry.addColumn(keyCol, true);
    mapQry.addColumn(valCol, true);
    for (GridSqlColumn c : update.cols()) {
        String newColName = Parser.quoteIdentifier("_upd_" + c.columnName());
        // We have to use aliases to cover cases when the user
        // wants to update _val field directly (if it's a literal)
        GridSqlAlias alias = new GridSqlAlias(newColName, elementOrDefault(update.set().get(c.columnName()), c), true);
        alias.resultType(c.resultType());
        mapQry.addColumn(alias, true);
    }
    GridSqlElement where = update.where();
    if (keysParamIdx != null)
        where = injectKeysFilterParam(where, keyCol, keysParamIdx);
    mapQry.where(where);
    mapQry.limit(update.limit());
    return mapQry;
}
Also used : Column(org.h2.table.Column) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) ValueString(org.h2.value.ValueString) HashSet(java.util.HashSet)

Aggregations

SQLException (java.sql.SQLException)44 DbException (org.h2.message.DbException)40 Database (org.h2.engine.Database)39 Connection (java.sql.Connection)37 PreparedStatement (java.sql.PreparedStatement)35 Value (org.h2.value.Value)34 ResultSet (java.sql.ResultSet)32 Statement (java.sql.Statement)31 Column (org.h2.table.Column)30 Table (org.h2.table.Table)23 JdbcConnection (org.h2.jdbc.JdbcConnection)22 Expression (org.h2.expression.Expression)19 StatementBuilder (org.h2.util.StatementBuilder)14 ValueExpression (org.h2.expression.ValueExpression)13 ValueString (org.h2.value.ValueString)13 ArrayList (java.util.ArrayList)10 Constraint (org.h2.constraint.Constraint)10 Index (org.h2.index.Index)10 IndexColumn (org.h2.table.IndexColumn)10 Task (org.h2.util.Task)10